diff --git a/.gitignore b/.gitignore
index 672c2c0..956481f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,34 @@
-# Created by https://www.gitignore.io/api/ntellij,go,linux,osx,windows
+# Created by https://www.gitignore.io/api/intellij,go,linux,osx,windows
+
+### Intellij ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+.idea
+*.iml
+
+## File-based project format:
+*.ipr
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
-#!! ERROR: ntellij is undefined. Use list command to see defined gitignore types !!#
### Go ###
# Compiled Object files, Static and Dynamic libs (Shared Objects)
@@ -32,6 +59,7 @@ _testmain.go
### Linux ###
*~
+*.swp
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
@@ -91,3 +119,6 @@ $RECYCLE.BIN/
# Windows shortcuts
*.lnk
+build/
+bind/
+wiki
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
new file mode 100644
index 0000000..0da1b68
--- /dev/null
+++ b/Godeps/Godeps.json
@@ -0,0 +1,51 @@
+{
+ "ImportPath": "amuz.es/go/wiki",
+ "GoVersion": "go1.5",
+ "Deps": [
+ {
+ "ImportPath": "github.com/elazarl/go-bindata-assetfs",
+ "Rev": "57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2"
+ },
+ {
+ "ImportPath": "github.com/flosch/pongo2",
+ "Comment": "v1.0-rc1-182-ga269242",
+ "Rev": "a269242022ae534b052672d6a9326a40560a63e7"
+ },
+ {
+ "ImportPath": "github.com/gin-gonic/gin",
+ "Comment": "v1.0rc1-219-g3d002e3",
+ "Rev": "3d002e382355cafc15d706b92899b1961d5b79e9"
+ },
+ {
+ "ImportPath": "github.com/gin-gonic/gin/binding",
+ "Comment": "v1.0rc1-219-g3d002e3",
+ "Rev": "3d002e382355cafc15d706b92899b1961d5b79e9"
+ },
+ {
+ "ImportPath": "github.com/gin-gonic/gin/render",
+ "Comment": "v1.0rc1-219-g3d002e3",
+ "Rev": "3d002e382355cafc15d706b92899b1961d5b79e9"
+ },
+ {
+ "ImportPath": "github.com/golang/glog",
+ "Rev": "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
+ },
+ {
+ "ImportPath": "github.com/golang/protobuf/proto",
+ "Rev": "c75fbf01dc6cb73649c4cd4326182c3e44aa9dbb"
+ },
+ {
+ "ImportPath": "github.com/manucorporat/sse",
+ "Rev": "ee05b128a739a0fb76c7ebd3ae4810c1de808d6d"
+ },
+ {
+ "ImportPath": "golang.org/x/net/context",
+ "Rev": "08f168e593b5aab61849054b77981de812666697"
+ },
+ {
+ "ImportPath": "gopkg.in/go-playground/validator.v8",
+ "Comment": "v8.17.1",
+ "Rev": "014792cf3e266caff1e916876be12282b33059e0"
+ }
+ ]
+}
diff --git a/Godeps/Readme b/Godeps/Readme
new file mode 100644
index 0000000..4cdaa53
--- /dev/null
+++ b/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
new file mode 100644
index 0000000..f037d68
--- /dev/null
+++ b/Godeps/_workspace/.gitignore
@@ -0,0 +1,2 @@
+/pkg
+/bin
diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile
new file mode 100644
index 0000000..e33ee17
--- /dev/null
+++ b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+ gofmt -w *.go
+
+docs:
+ gomake clean
+ godoc ${TARG} > README.txt
diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000..648b38c
--- /dev/null
+++ b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/.gitignore b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/.gitignore
new file mode 100644
index 0000000..df826e0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.out
diff --git a/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/.travis.yml b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/.travis.yml
new file mode 100644
index 0000000..1de94bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+go:
+ - 1.2
+ - 1.4
+ - tip
+before_install:
+ - go get github.com/axw/gocov/gocov
+ - go get github.com/mattn/goveralls
+ - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/LICENSE b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/LICENSE
new file mode 100644
index 0000000..902306b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 Sean Dolphin
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/README.md b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/README.md
new file mode 100644
index 0000000..0347366
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/README.md
@@ -0,0 +1,66 @@
+# BQSchema
+
+**Documentation:** [![GoDoc](https://godoc.org/github.com/SeanDolphin/bqschema?status.png)](http://godoc.org/github.com/SeanDolphin/bqschema)
+**Build Status:** [![Build Status](https://travis-ci.org/SeanDolphin/bqschema.svg?branch=master)](https://travis-ci.org/SeanDolphin/bqschema)
+**Test Coverage:** [![Coverage Status](https://coveralls.io/repos/SeanDolphin/bqschema/badge.svg)](https://coveralls.io/r/SeanDolphin/bqschema)
+
+
+BQSchema is a package used to created Google Big Query schema directly from Go structs and import BigQuery QueryResponse into arrays of Go structs.
+
+## Usage
+
+You can use BQSchema to automatically load Google Big Query results into arrays of basic Go structs.
+
+~~~ go
+// main.go
+package main
+
+import (
+ "google.golang.org/api/bigquery/v2"
+ "github.com/SeanDolphin/bqschema"
+)
+
+type person struct{
+ Name string
+ Email string
+ Age int
+}
+
+func main() {
+ // authorize the bigquery service
+ // create a query
+ result, err := bq.Jobs.Query("projectID", query).Do()
+ if err == nil {
+ var people []person
+ err := bqschema.ToStructs(result, &people)
+ // do something with people
+ }
+}
+
+~~~
+
+You can also use BQSchema to create the schema fields when creating new Big Query tables from basic Go structs.
+
+~~~ go
+// main.go
+package main
+
+import (
+ "google.golang.org/api/bigquery/v2"
+ "github.com/SeanDolphin/bqschema"
+)
+
+type person struct{
+ Name string
+ Email string
+ Age int
+}
+
+func main() {
+ // authorize the bigquery service
+ table, err := bq.Tables.Insert("projectID","datasetID", bigquery.Table{
+ Schema:bqschema.MustToSchema(person{})
+ }).Do()
+}
+
+~~~
diff --git a/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/toSchema.go b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/toSchema.go
new file mode 100644
index 0000000..49a726a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/toSchema.go
@@ -0,0 +1,151 @@
+package bqschema
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+ "time"
+
+ "google.golang.org/api/bigquery/v2"
+)
+
+var (
+ ArrayOfArray = errors.New("Array of Arrays not allowed")
+ UnknownType = errors.New("Unknown type")
+ NotStruct = errors.New("Can not convert non structs")
+)
+
+func ToSchema(src interface{}) (*bigquery.TableSchema, error) {
+ value := reflect.ValueOf(src)
+ t := value.Type()
+
+ schema := &bigquery.TableSchema{}
+
+ if t.Kind() == reflect.Struct {
+ schema.Fields = make([]*bigquery.TableFieldSchema, 0, t.NumField())
+ for i := 0; i < t.NumField(); i++ {
+ sf := t.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ v := pointerGuard(value.Field(i))
+
+ var name string
+ jsonTag := sf.Tag.Get("json")
+ switch jsonTag {
+ case "-":
+ continue
+ case "":
+ name = sf.Name
+ default:
+ name = strings.Split(jsonTag, ",")[0]
+ }
+
+ tfs := &bigquery.TableFieldSchema{
+ Mode: "required",
+ Name: name,
+ Type: "",
+ }
+ schema.Fields = append(schema.Fields, tfs)
+
+ kind := v.Kind()
+ t, isSimple := simpleType(kind)
+
+ if isSimple {
+ tfs.Type = t
+ } else {
+ switch kind {
+ case reflect.Struct:
+ tfs.Mode = "nullable"
+ if t, fields, err := structConversion(v.Interface()); err == nil {
+ tfs.Type = t
+ if t == "string" {
+ tfs.Mode = "required"
+ }
+ tfs.Fields = fields
+ } else {
+ return schema, err
+ }
+ case reflect.Array, reflect.Slice:
+ tfs.Mode = "repeated"
+ subKind := pointerGuard(v.Type().Elem()).Kind()
+ t, isSimple := simpleType(subKind)
+ if isSimple {
+ schema.Fields[i].Type = t
+ } else if subKind == reflect.Struct {
+ subStruct := reflect.Zero(pointerGuard(v.Type().Elem()).Type()).Interface()
+ if t, fields, err := structConversion(subStruct); err == nil {
+ schema.Fields[i].Type = t
+ schema.Fields[i].Fields = fields
+
+ } else {
+ return schema, err
+ }
+ } else {
+ return schema, ArrayOfArray
+ }
+ default:
+ return schema, UnknownType
+ }
+ }
+ }
+ } else {
+ return schema, NotStruct
+ }
+
+ return schema, nil
+}
+
+func MustToSchema(src interface{}) *bigquery.TableSchema {
+ schema, err := ToSchema(src)
+ if err != nil {
+ panic(err)
+ }
+ return schema
+}
+
+func simpleType(kind reflect.Kind) (string, bool) {
+ isSimple := true
+ t := ""
+ switch kind {
+ case reflect.Bool:
+ t = "boolean"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ t = "integer"
+ case reflect.Float32, reflect.Float64:
+ t = "float"
+ case reflect.String:
+ t = "string"
+ default:
+ isSimple = false
+ }
+ return t, isSimple
+}
+
+func structConversion(src interface{}) (string, []*bigquery.TableFieldSchema, error) {
+ v := reflect.ValueOf(src)
+ if v.Type().Name() == "Key" && strings.Contains(v.Type().PkgPath(), "appengine") {
+ return "string", nil, nil
+ } else if v.Type().ConvertibleTo(reflect.TypeOf(time.Time{})) {
+ return "timestamp", nil, nil
+ } else {
+ schema, err := ToSchema(src)
+ return "record", schema.Fields, err
+ }
+}
+
+func pointerGuard(i interface{}) reflect.Value {
+ var v reflect.Value
+ var ok bool
+ v, ok = i.(reflect.Value)
+ if !ok {
+ if t, ok := i.(reflect.Type); ok {
+ v = reflect.Indirect(reflect.New(t))
+ }
+ }
+
+ if v.Kind() == reflect.Ptr {
+ v = reflect.Indirect(reflect.New(v.Type().Elem()))
+ }
+ return v
+}
diff --git a/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/toStructs.go b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/toStructs.go
new file mode 100644
index 0000000..f6550e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/SeanDolphin/bqschema/toStructs.go
@@ -0,0 +1,76 @@
+package bqschema
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+
+ "google.golang.org/api/bigquery/v2"
+)
+
+func ToStructs(result *bigquery.QueryResponse, dst interface{}) error {
+ var err error
+ value := reflect.Indirect(reflect.ValueOf(dst))
+
+ itemType := value.Type().Elem()
+ rowCount := len(result.Rows)
+
+ nameMap := map[string]string{}
+
+ for i := 0; i < itemType.NumField(); i++ {
+ field := itemType.Field(i)
+ nameMap[strings.ToLower(field.Name)] = field.Name
+ }
+
+ items := reflect.MakeSlice(value.Type(), rowCount, rowCount)
+ for i := 0; i < rowCount; i++ {
+ item := reflect.Indirect(reflect.New(itemType))
+ row := result.Rows[i]
+ for j, cell := range row.F {
+ schemaField := result.Schema.Fields[j]
+
+ if name, ok := nameMap[strings.ToLower(schemaField.Name)]; ok {
+ field := item.FieldByName(name)
+ if field.IsValid() {
+ switch field.Kind() {
+ case reflect.Float64, reflect.Float32:
+ f, err := strconv.ParseFloat(cell.V.(string), 64)
+ if err == nil {
+ field.SetFloat(f)
+ } else {
+ return err
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i, err := strconv.ParseInt(cell.V.(string), 10, 64)
+ if err == nil {
+ field.SetInt(i)
+ } else {
+ return err
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i, err := strconv.ParseUint(cell.V.(string), 10, 64)
+ if err == nil {
+ field.SetUint(i)
+ } else {
+ return err
+ }
+
+ case reflect.Bool:
+ b, err := strconv.ParseBool(cell.V.(string))
+ if err == nil {
+ field.SetBool(b)
+ } else {
+ return err
+ }
+ case reflect.String:
+ field.Set(reflect.ValueOf(cell.V))
+ }
+
+ }
+ }
+ }
+ items.Index(i).Set(item)
+ }
+ value.Set(items)
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 0000000..3591f9f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml b/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 0000000..776514b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,33 @@
+language: go
+go:
+- 1.4.3
+- 1.5.2
+
+env:
+ global:
+ - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+ - TOXIPROXY_ADDR=http://localhost:8474
+ - KAFKA_INSTALL_ROOT=/home/travis/kafka
+ - KAFKA_HOSTNAME=localhost
+ - DEBUG=true
+ matrix:
+ - KAFKA_VERSION=0.8.1.1
+ - KAFKA_VERSION=0.8.2.2
+ - KAFKA_VERSION=0.9.0.0
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+
+install:
+- make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- make fmt
+
+sudo: false
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md b/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 0000000..fd96b84
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,216 @@
+# Changelog
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+ ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+ caveats:
+ - Protocol-layer support is mostly in place
+ ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+ renamed some messages and fields, which we did not in order to preserve API
+ compatibility.
+ - The producer and consumer work against 0.9, but the offset manager does
+ not ([#573](https://github.com/Shopify/sarama/pull/573)).
+ - TLS support may or may not work
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+ when the TCP connection is left hanging
+ ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+ solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+ slightly more efficient, and much more precise in calculating batch sizes
+ when compression is used
+ ([#549](https://github.com/Shopify/sarama/pull/549),
+ [#550](https://github.com/Shopify/sarama/pull/550),
+ [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+ ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+ ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+ Kafka 0.8.2. The API is designed mainly for integration into a future
+ high-level consumer, not for direct use, although it is *possible* to use it
+ directly.
+ ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+ removing a major hotspot from most profiles
+ ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+ by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+ [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+ ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+ ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+ to change when Kafka releases built-in TLS support, but for now this is
+ enough to work with TLS-terminating proxies
+ ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+ all other partitions will continue to consume normally
+ ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+ ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+ future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+ ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+ circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+ the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+ changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+ ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+ ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+ ([#450](https://github.com/Shopify/sarama/pull/450),
+ [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+ users to dynamically choose what topics/partitions to consume without
+ instantiating a full client
+ ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+ by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+ partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+ ([#439](https://github.com/Shopify/sarama/pull/439),
+ [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+ useful, and slightly less verbose
+ ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+ thundering herd on the first broker in the list
+ ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+ shutting down, fixing several instances of confusing behaviour and at least
+ one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+ making it much more resilient to specific user code ordering
+ ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+ ConsumerMetadataRequests similar to how it tracks partition leadership using
+ regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+ This adds two methods to the client API:
+ - `Coordinator(consumerGroup string) (*Broker, error)`
+ - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+ ID/address/port combination for the Coordinator; accessing the fields
+ individually has been deprecated
+ ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+ Consumers will fail to start if the provided offset is out of range
+ ([#418](https://github.com/Shopify/sarama/pull/418))
+ and they will automatically shut down if the offset falls out of range
+ ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+ ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+ it happens to be activated while the client is being closed
+ ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+ ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+ messages due to an improved queue implementation
+ ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+ changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+ retry once in the event of stale information or similar
+ ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+ ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+ API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+ [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+ broken topics don't choke throughput
+ ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+ ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+ ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+ gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+ metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md
new file mode 100644
index 0000000..b0f107c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing
+
+Contributions are always welcome, both reporting issues and submitting pull requests!
+
+### Reporting issues
+
+Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
+
+- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
+- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
+- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
+
+Also, please include the following information about your environment, so we can help you faster:
+
+- What version of Kafka are you using?
+- What version of Go are you using?
+- What are the values of your Producer/Consumer/Client configuration?
+
+
+### Submitting pull requests
+
+We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
+
+- If you plan to work on something major, please open an issue to discuss the design first.
+- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
+- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
+- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
+- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
+- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
+- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
+- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE b/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE
new file mode 100644
index 0000000..8121b63
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile b/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile
new file mode 100644
index 0000000..b76e97a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,24 @@
+default: fmt vet errcheck test
+
+test:
+ go test -v -timeout 60s -race ./...
+
+vet:
+ go vet ./...
+
+errcheck:
+ errcheck github.com/Shopify/sarama/...
+
+fmt:
+ @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+install_dependencies: install_errcheck install_go_vet get
+
+install_errcheck:
+ go get github.com/kisielk/errcheck
+
+install_go_vet:
+ go get golang.org/x/tools/cmd/vet
+
+get:
+ go get -t
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/README.md
new file mode 100644
index 0000000..63247b3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/README.md
@@ -0,0 +1,36 @@
+sarama
+======
+
+[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
+[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+### Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+### Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.4 and 1.5, and Kafka 0.8.1 and 0.8.2, although older releases are still
+likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+### Contributing
+
+* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md).
+* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
+ technical and design details.
+* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
+ contains a wealth of useful information.
+* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+* If you have any questions, just ask!
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile b/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 0000000..4586d9a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,19 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.provision :shell, path: "vagrant/provision.sh"
+
+ config.vm.network "private_network", ip: "192.168.100.67"
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = MEMORY
+ end
+end
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 0000000..44c0abf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,891 @@
+package sarama
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/eapache/go-resiliency/breaker"
+ "github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+ // AsyncClose triggers a shutdown of the producer, flushing any messages it may
+ // have buffered. The shutdown has completed when both the Errors and Successes
+ // channels have been closed. When calling AsyncClose, you *must* continue to
+ // read from those channels in order to drain the results of any messages in
+ // flight.
+ AsyncClose()
+
+ // Close shuts down the producer and flushes any messages it may have buffered.
+ // You must call this function before a producer object passes out of scope, as
+ // it may otherwise leak memory. You must call this before calling Close on the
+ // underlying client.
+ Close() error
+
+ // Input is the input channel for the user to write messages to that they
+ // wish to send.
+ Input() chan<- *ProducerMessage
+
+ // Successes is the success output channel back to the user when AckSuccesses is
+ // enabled. If Return.Successes is true, you MUST read from this channel or the
+ // Producer will deadlock. It is suggested that you send and read messages
+ // together in a single select statement.
+ Successes() <-chan *ProducerMessage
+
+ // Errors is the error output channel back to the user. You MUST read from this
+ // channel or the Producer will deadlock when the channel is full. Alternatively,
+ // you can set Producer.Return.Errors in your config to false, which prevents
+ // errors to be returned.
+ Errors() <-chan *ProducerError
+}
+
+type asyncProducer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ errors chan *ProducerError
+ input, successes, retries chan *ProducerMessage
+ inFlight sync.WaitGroup
+
+ brokers map[*Broker]chan<- *ProducerMessage
+ brokerRefs map[chan<- *ProducerMessage]int
+ brokerLock sync.Mutex
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ p.(*asyncProducer).ownClient = true
+ return p, nil
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ p := &asyncProducer{
+ client: client,
+ conf: client.Config(),
+ errors: make(chan *ProducerError),
+ input: make(chan *ProducerMessage),
+ successes: make(chan *ProducerMessage),
+ retries: make(chan *ProducerMessage),
+ brokers: make(map[*Broker]chan<- *ProducerMessage),
+ brokerRefs: make(map[chan<- *ProducerMessage]int),
+ }
+
+ // launch our singleton dispatchers
+ go withRecover(p.dispatcher)
+ go withRecover(p.retryHandler)
+
+ return p, nil
+}
+
+type flagSet int8
+
+const (
+ syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+ fin // final message from partitionProducer to brokerProducer and back
+ shutdown // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+ Topic string // The Kafka topic for this message.
+ // The partitioning key for this message. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Key Encoder
+ // The actual message to store in Kafka. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Value Encoder
+
+ // This field is used to hold arbitrary data you wish to include so it
+ // will be available when receiving on the Successes and Errors channels.
+ // Sarama completely ignores this field and is only to be used for
+ // pass-through data.
+ Metadata interface{}
+
+ // Below this point are filled in by the producer as the message is processed
+
+ // Offset is the offset of the message stored on the broker. This is only
+ // guaranteed to be defined if the message was successfully delivered and
+ // RequiredAcks is not NoResponse.
+ Offset int64
+ // Partition is the partition that the message was sent to. This is only
+ // guaranteed to be defined if the message was successfully delivered.
+ Partition int32
+
+ retries int
+ flags flagSet
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize() int {
+ size := producerMessageOverhead
+ if m.Key != nil {
+ size += m.Key.Length()
+ }
+ if m.Value != nil {
+ size += m.Value.Length()
+ }
+ return size
+}
+
+func (m *ProducerMessage) clear() {
+ m.flags = 0
+ m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+ Msg *ProducerMessage
+ Err error
+}
+
+func (pe ProducerError) Error() string {
+ return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+ return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+ return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+ return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+ return p.input
+}
+
+func (p *asyncProducer) Close() error {
+ p.AsyncClose()
+
+ if p.conf.Producer.Return.Successes {
+ go withRecover(func() {
+ for _ = range p.successes {
+ }
+ })
+ }
+
+ var errors ProducerErrors
+ if p.conf.Producer.Return.Errors {
+ for event := range p.errors {
+ errors = append(errors, event)
+ }
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+ go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+ handlers := make(map[string]chan<- *ProducerMessage)
+ shuttingDown := false
+
+ for msg := range p.input {
+ if msg == nil {
+ Logger.Println("Something tried to send a nil message, it was ignored.")
+ continue
+ }
+
+ if msg.flags&shutdown != 0 {
+ shuttingDown = true
+ p.inFlight.Done()
+ continue
+ } else if msg.retries == 0 {
+ if shuttingDown {
+ // we can't just call returnError here because that decrements the wait group,
+ // which hasn't been incremented yet for this message, and shouldn't be
+ pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ continue
+ }
+ p.inFlight.Add(1)
+ }
+
+ if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
+ p.returnError(msg, ErrMessageSizeTooLarge)
+ continue
+ }
+
+ handler := handlers[msg.Topic]
+ if handler == nil {
+ handler = p.newTopicProducer(msg.Topic)
+ handlers[msg.Topic] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range handlers {
+ close(handler)
+ }
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+ parent *asyncProducer
+ topic string
+ input <-chan *ProducerMessage
+
+ breaker *breaker.Breaker
+ handlers map[int32]chan<- *ProducerMessage
+ partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ tp := &topicProducer{
+ parent: p,
+ topic: topic,
+ input: input,
+ breaker: breaker.New(3, 1, 10*time.Second),
+ handlers: make(map[int32]chan<- *ProducerMessage),
+ partitioner: p.conf.Producer.Partitioner(topic),
+ }
+ go withRecover(tp.dispatch)
+ return input
+}
+
+func (tp *topicProducer) dispatch() {
+ for msg := range tp.input {
+ if msg.retries == 0 {
+ if err := tp.partitionMessage(msg); err != nil {
+ tp.parent.returnError(msg, err)
+ continue
+ }
+ }
+
+ handler := tp.handlers[msg.Partition]
+ if handler == nil {
+ handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+ tp.handlers[msg.Partition] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range tp.handlers {
+ close(handler)
+ }
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+ var partitions []int32
+
+ err := tp.breaker.Run(func() (err error) {
+ if tp.partitioner.RequiresConsistency() {
+ partitions, err = tp.parent.client.Partitions(msg.Topic)
+ } else {
+ partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+ }
+ return
+ })
+
+ if err != nil {
+ return err
+ }
+
+ numPartitions := int32(len(partitions))
+
+ if numPartitions == 0 {
+ return ErrLeaderNotAvailable
+ }
+
+ choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+ if err != nil {
+ return err
+ } else if choice < 0 || choice >= numPartitions {
+ return ErrInvalidPartition
+ }
+
+ msg.Partition = partitions[choice]
+
+ return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+ parent *asyncProducer
+ topic string
+ partition int32
+ input <-chan *ProducerMessage
+
+ leader *Broker
+ breaker *breaker.Breaker
+ output chan<- *ProducerMessage
+
+ // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+ // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+ // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+ // therefore whether our buffer is complete and safe to flush)
+ highWatermark int
+ retryState []partitionRetryState
+}
+
+type partitionRetryState struct {
+ buf []*ProducerMessage
+ expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ pp := &partitionProducer{
+ parent: p,
+ topic: topic,
+ partition: partition,
+ input: input,
+
+ breaker: breaker.New(3, 1, 10*time.Second),
+ retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+ }
+ go withRecover(pp.dispatch)
+ return input
+}
+
+func (pp *partitionProducer) dispatch() {
+ // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+ // on the first message
+ pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+ if pp.leader != nil {
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+ }
+
+ for msg := range pp.input {
+ if msg.retries > pp.highWatermark {
+ // a new, higher, retry level; handle it and then back off
+ pp.newHighWatermark(msg.retries)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ } else if pp.highWatermark > 0 {
+ // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+ if msg.retries < pp.highWatermark {
+ // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+ if msg.flags&fin == fin {
+ pp.retryState[msg.retries].expectChaser = false
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ } else {
+ pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+ }
+ continue
+ } else if msg.flags&fin == fin {
+ // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+ // meaning this retry level is done and we can go down (at least) one level and flush that
+ pp.retryState[pp.highWatermark].expectChaser = false
+ pp.flushRetryBuffers()
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ continue
+ }
+ }
+
+ // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+ // without breaking any of our ordering guarantees
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnError(msg, err)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ continue
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ pp.output <- msg
+ }
+
+ if pp.output != nil {
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ }
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+ pp.highWatermark = hwm
+
+ // send off a fin so that we know when everything "in between" has made it
+ // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+ pp.retryState[pp.highWatermark].expectChaser = true
+ pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+ // a new HWM means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ pp.output = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+ Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ for {
+ pp.highWatermark--
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+ goto flushDone
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ for _, msg := range pp.retryState[pp.highWatermark].buf {
+ pp.output <- msg
+ }
+
+ flushDone:
+ pp.retryState[pp.highWatermark].buf = nil
+ if pp.retryState[pp.highWatermark].expectChaser {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ break
+ } else if pp.highWatermark == 0 {
+ Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+ break
+ }
+ }
+}
+
+func (pp *partitionProducer) updateLeader() error {
+ return pp.breaker.Run(func() (err error) {
+ if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+ return err
+ }
+
+ if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+ return err
+ }
+
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+ return nil
+ })
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ var (
+ input = make(chan *ProducerMessage)
+ bridge = make(chan *produceSet)
+ responses = make(chan *brokerProducerResponse)
+ )
+
+ bp := &brokerProducer{
+ parent: p,
+ broker: broker,
+ input: input,
+ output: bridge,
+ responses: responses,
+ buffer: newProduceSet(p),
+ currentRetries: make(map[string]map[int32]error),
+ }
+ go withRecover(bp.run)
+
+ // minimal bridge to make the network response `select`able
+ go withRecover(func() {
+ for set := range bridge {
+ request := set.buildRequest()
+
+ response, err := broker.Produce(request)
+
+ responses <- &brokerProducerResponse{
+ set: set,
+ err: err,
+ res: response,
+ }
+ }
+ close(responses)
+ })
+
+ return input
+}
+
+type brokerProducerResponse struct {
+ set *produceSet
+ err error
+ res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+ parent *asyncProducer
+ broker *Broker
+
+ input <-chan *ProducerMessage
+ output chan<- *produceSet
+ responses <-chan *brokerProducerResponse
+
+ buffer *produceSet
+ timer <-chan time.Time
+ timerFired bool
+
+ closing error
+ currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+ var output chan<- *produceSet
+ Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+ for {
+ select {
+ case msg := <-bp.input:
+ if msg == nil {
+ goto shutdown
+ }
+
+ if msg.flags&syn == syn {
+ Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ if bp.currentRetries[msg.Topic] == nil {
+ bp.currentRetries[msg.Topic] = make(map[int32]error)
+ }
+ bp.currentRetries[msg.Topic][msg.Partition] = nil
+ bp.parent.inFlight.Done()
+ continue
+ }
+
+ if reason := bp.needsRetry(msg); reason != nil {
+ bp.parent.retryMessage(msg, reason)
+
+ if bp.closing == nil && msg.flags&fin == fin {
+ // we were retrying this partition but we can start processing again
+ delete(bp.currentRetries[msg.Topic], msg.Partition)
+ Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ }
+
+ continue
+ }
+
+ if bp.buffer.wouldOverflow(msg) {
+ if err := bp.waitForSpace(msg); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+
+ if err := bp.buffer.add(msg); err != nil {
+ bp.parent.returnError(msg, err)
+ continue
+ }
+
+ if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+ bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+ }
+ case <-bp.timer:
+ bp.timerFired = true
+ case output <- bp.buffer:
+ bp.rollOver()
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ }
+
+ if bp.timerFired || bp.buffer.readyToFlush() {
+ output = bp.output
+ } else {
+ output = nil
+ }
+ }
+
+shutdown:
+ for !bp.buffer.empty() {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ }
+ }
+ close(bp.output)
+ for response := range bp.responses {
+ bp.handleResponse(response)
+ }
+
+ Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+ if bp.closing != nil {
+ return bp.closing
+ }
+
+ return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+ Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+ for {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ // handling a response can change our state, so re-check some things
+ if reason := bp.needsRetry(msg); reason != nil {
+ return reason
+ } else if !bp.buffer.wouldOverflow(msg) {
+ return nil
+ }
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ return nil
+ }
+ }
+}
+
+func (bp *brokerProducer) rollOver() {
+ bp.timer = nil
+ bp.timerFired = false
+ bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+ if response.err != nil {
+ bp.handleError(response.set, response.err)
+ } else {
+ bp.handleSuccess(response.set, response.res)
+ }
+
+ if bp.buffer.empty() {
+ bp.rollOver() // this can happen if the response invalidated our buffer
+ }
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+ // we iterate through the blocks in the request set, not the response, so that we notice
+ // if the response is missing a block completely
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ if response == nil {
+ // this only happens when RequiredAcks is NoResponse, so we have to assume success
+ bp.parent.returnSuccesses(msgs)
+ return
+ }
+
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ bp.parent.returnErrors(msgs, ErrIncompleteResponse)
+ return
+ }
+
+ switch block.Err {
+ // Success
+ case ErrNoError:
+ for i, msg := range msgs {
+ msg.Offset = block.Offset + int64(i)
+ }
+ bp.parent.returnSuccesses(msgs)
+ // Retriable errors
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+ bp.broker.ID(), topic, partition, block.Err)
+ bp.currentRetries[topic][partition] = block.Err
+ bp.parent.retryMessages(msgs, block.Err)
+ bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+ // Other non-retriable errors
+ default:
+ bp.parent.returnErrors(msgs, block.Err)
+ }
+ })
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+ switch err.(type) {
+ case PacketEncodingError:
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.returnErrors(msgs, err)
+ })
+ default:
+ Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+ bp.parent.abandonBrokerConnection(bp.broker)
+ _ = bp.broker.Close()
+ bp.closing = err
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.rollOver()
+ }
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+ var msg *ProducerMessage
+ buf := queue.New()
+
+ for {
+ if buf.Length() == 0 {
+ msg = <-p.retries
+ } else {
+ select {
+ case msg = <-p.retries:
+ case p.input <- buf.Peek().(*ProducerMessage):
+ buf.Remove()
+ continue
+ }
+ }
+
+ if msg == nil {
+ return
+ }
+
+ buf.Add(msg)
+ }
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+ Logger.Println("Producer shutting down.")
+ p.inFlight.Add(1)
+ p.input <- &ProducerMessage{flags: shutdown}
+
+ p.inFlight.Wait()
+
+ if p.ownClient {
+ err := p.client.Close()
+ if err != nil {
+ Logger.Println("producer/shutdown failed to close the embedded client:", err)
+ }
+ }
+
+ close(p.input)
+ close(p.retries)
+ close(p.errors)
+ close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+ msg.clear()
+ pErr := &ProducerError{Msg: msg, Err: err}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.returnError(msg, err)
+ }
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+ for _, msg := range batch {
+ if p.conf.Producer.Return.Successes {
+ msg.clear()
+ p.successes <- msg
+ }
+ p.inFlight.Done()
+ }
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, err)
+ } else {
+ msg.retries++
+ p.retries <- msg
+ }
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.retryMessage(msg, err)
+ }
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bp := p.brokers[broker]
+
+ if bp == nil {
+ bp = p.newBrokerProducer(broker)
+ p.brokers[broker] = bp
+ p.brokerRefs[bp] = 0
+ }
+
+ p.brokerRefs[bp]++
+
+ return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ p.brokerRefs[bp]--
+ if p.brokerRefs[bp] == 0 {
+ close(bp)
+ delete(p.brokerRefs, bp)
+
+ if p.brokers[broker] == bp {
+ delete(p.brokers, broker)
+ }
+ }
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ delete(p.brokers, broker)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go b/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go
new file mode 100644
index 0000000..46f06a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,395 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+ id int32
+ addr string
+
+ conf *Config
+ correlationID int32
+ conn net.Conn
+ connErr error
+ lock sync.Mutex
+ opened int32
+
+ responses chan responsePromise
+ done chan bool
+}
+
+type responsePromise struct {
+ correlationID int32
+ packets chan []byte
+ errors chan error
+}
+
+// NewBroker creates and returns a Broker targetting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+ return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ err := conf.Validate()
+ if err != nil {
+ return err
+ }
+
+ if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+ return ErrAlreadyConnected
+ }
+
+ b.lock.Lock()
+
+ if b.conn != nil {
+ b.lock.Unlock()
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, ErrAlreadyConnected)
+ return ErrAlreadyConnected
+ }
+
+ go withRecover(func() {
+ defer b.lock.Unlock()
+
+ dialer := net.Dialer{
+ Timeout: conf.Net.DialTimeout,
+ KeepAlive: conf.Net.KeepAlive,
+ }
+
+ if conf.Net.TLS.Enable {
+ b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+ } else {
+ b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+ }
+ if b.connErr != nil {
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+ return
+ }
+
+ b.conf = conf
+ b.done = make(chan bool)
+ b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+ if b.id >= 0 {
+ Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+ } else {
+ Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+ }
+ go withRecover(b.responseReceiver)
+ })
+
+ return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return b.conn != nil, b.connErr
+}
+
+func (b *Broker) Close() error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return ErrNotConnected
+ }
+
+ close(b.responses)
+ <-b.done
+
+ err := b.conn.Close()
+
+ b.conn = nil
+ b.connErr = nil
+ b.done = nil
+ b.responses = nil
+
+ atomic.StoreInt32(&b.opened, 0)
+
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+
+ return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+ return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+ return b.addr
+}
+
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+ response := new(MetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+ response := new(ConsumerMetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+ response := new(OffsetResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+ var response *ProduceResponse
+ var err error
+
+ if request.RequiredAcks == NoResponse {
+ err = b.sendAndReceive(request, nil)
+ } else {
+ response = new(ProduceResponse)
+ err = b.sendAndReceive(request, response)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+ response := new(FetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+ response := new(OffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+ response := new(OffsetFetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ if b.connErr != nil {
+ return nil, b.connErr
+ }
+ return nil, ErrNotConnected
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req)
+ if err != nil {
+ return nil, err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = b.conn.Write(buf)
+ if err != nil {
+ return nil, err
+ }
+ b.correlationID++
+
+ if !promiseResponse {
+ return nil, nil
+ }
+
+ promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)}
+ b.responses <- promise
+
+ return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req requestBody, res decoder) error {
+ promise, err := b.send(req, res != nil)
+
+ if err != nil {
+ return err
+ }
+
+ if promise == nil {
+ return nil
+ }
+
+ select {
+ case buf := <-promise.packets:
+ return decode(buf, res)
+ case err = <-promise.errors:
+ return err
+ }
+}
+
+func (b *Broker) decode(pd packetDecoder) (err error) {
+ b.id, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ host, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ port, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+ if _, _, err := net.SplitHostPort(b.addr); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Broker) encode(pe packetEncoder) (err error) {
+
+ host, portstr, err := net.SplitHostPort(b.addr)
+ if err != nil {
+ return err
+ }
+ port, err := strconv.Atoi(portstr)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(b.id)
+
+ err = pe.putString(host)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(int32(port))
+
+ return nil
+}
+
+func (b *Broker) responseReceiver() {
+ var dead error
+ header := make([]byte, 8)
+ for response := range b.responses {
+ if dead != nil {
+ response.errors <- dead
+ continue
+ }
+
+ err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ _, err = io.ReadFull(b.conn, header)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ decodedHeader := responseHeader{}
+ err = decode(header, &decodedHeader)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+ if decodedHeader.correlationID != response.correlationID {
+ // TODO if decoded ID < cur ID, discard until we catch up
+ // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+ dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+ response.errors <- dead
+ continue
+ }
+
+ buf := make([]byte, decodedHeader.length-4)
+ _, err = io.ReadFull(b.conn, buf)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ response.packets <- buf
+ }
+ close(b.done)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/client.go b/Godeps/_workspace/src/github.com/Shopify/sarama/client.go
new file mode 100644
index 0000000..c7aecc3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/client.go
@@ -0,0 +1,732 @@
+package sarama
+
+import (
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. A single client can be safely shared by
+// multiple concurrent Producers and Consumers.
+type Client interface {
+ // Config returns the Config struct of the client. This struct should not be
+ // altered after it has been created.
+ Config() *Config
+
+ // Topics returns the set of available topics as retrieved from cluster metadata.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ Partitions(topic string) ([]int32, error)
+
+ // WritablePartitions returns the sorted list of all writable partition IDs for
+ // the given topic, where "writable" means "having a valid leader accepting
+ // writes".
+ WritablePartitions(topic string) ([]int32, error)
+
+ // Leader returns the broker object that is the leader of the current
+ // topic/partition, as determined by querying the cluster metadata.
+ Leader(topic string, partitionID int32) (*Broker, error)
+
+ // Replicas returns the set of all replica IDs for the given partition.
+ Replicas(topic string, partitionID int32) ([]int32, error)
+
+ // RefreshMetadata takes a list of topics and queries the cluster to refresh the
+ // available metadata for those topics. If no topics are provided, it will refresh
+ // metadata for all topics.
+ RefreshMetadata(topics ...string) error
+
+ // GetOffset queries the cluster to get the most recent available offset at the
+ // given time on the topic/partition combination. Time should be OffsetOldest for
+ // the earliest available offset, OffsetNewest for the offset of the message that
+ // will be produced next, or a time.
+ GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.8.2 and higher.
+ Coordinator(consumerGroup string) (*Broker, error)
+
+ // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+ // in local cache. This function only works on Kafka 0.8.2 and higher.
+ RefreshCoordinator(consumerGroup string) error
+
+ // Close shuts down all broker connections managed by this client. It is required
+ // to call this function before a client object passes out of scope, as it will
+ // otherwise leak memory. You must close any Producers or Consumers using a client
+ // before you close the client.
+ Close() error
+
+ // Closed returns true if the client has already had Close called on it
+ Closed() bool
+}
+
+const (
+ // OffsetNewest stands for the log head offset, i.e. the offset that will be
+ // assigned to the next message that will be produced to the partition. You
+ // can send this to a client's GetOffset method to get this offset, or when
+ // calling ConsumePartition to start consuming new messages.
+ OffsetNewest int64 = -1
+ // OffsetOldest stands for the oldest offset available on the broker for a
+ // partition. You can send this to a client's GetOffset method to get this
+ // offset, or when calling ConsumePartition to start consuming from the
+ // oldest offset that is still available on the broker.
+ OffsetOldest int64 = -2
+)
+
+type client struct {
+ conf *Config
+ closer, closed chan none // for shutting down background metadata updater
+
+ // the broker addresses given to us through the constructor are not guaranteed to be returned in
+ // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+ // so we store them separately
+ seedBrokers []*Broker
+ deadSeeds []*Broker
+
+ brokers map[int32]*Broker // maps broker ids to brokers
+ metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+ coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
+
+ // If the number of partitions is large, we can get some churn calling cachedPartitions,
+ // so the result is cached. It is important to update this value whenever metadata is changed
+ cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+ lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+ Logger.Println("Initializing new client")
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ if err := conf.Validate(); err != nil {
+ return nil, err
+ }
+
+ if len(addrs) < 1 {
+ return nil, ConfigurationError("You must provide at least one broker address")
+ }
+
+ client := &client{
+ conf: conf,
+ closer: make(chan none),
+ closed: make(chan none),
+ brokers: make(map[int32]*Broker),
+ metadata: make(map[string]map[int32]*PartitionMetadata),
+ cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+ coordinators: make(map[string]int32),
+ }
+
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for _, index := range random.Perm(len(addrs)) {
+ client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+ }
+
+ // do an initial fetch of all cluster metadata by specifing an empty list of topics
+ err := client.RefreshMetadata()
+ switch err {
+ case nil:
+ break
+ case ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ default:
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
+ go withRecover(client.backgroundMetadataUpdater)
+
+ Logger.Println("Successfully initialized new client")
+
+ return client, nil
+}
+
+func (client *client) Config() *Config {
+ return client.conf
+}
+
+func (client *client) Close() error {
+ if client.Closed() {
+ // Chances are this is being called from a defer() and the error will go unobserved
+ // so we go ahead and log the event in this case.
+ Logger.Printf("Close() called on already closed client")
+ return ErrClosedClient
+ }
+
+ // shutdown and wait for the background thread before we take the lock, to avoid races
+ close(client.closer)
+ <-client.closed
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ Logger.Println("Closing Client")
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ client.brokers = nil
+ client.metadata = nil
+
+ return nil
+}
+
+func (client *client) Closed() bool {
+ return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadata))
+ for topic := range client.metadata {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, allPartitions)
+
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, allPartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, writablePartitions)
+
+ // len==0 catches when it's nil (no such topic) and the odd case when every single
+ // partition is undergoing leader election simultaneously. Callers have to be able to handle
+ // this function returning an empty slice (which is a valid return value) but catching it
+ // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+ // a metadata refresh as a nicety so callers can just try again and don't have to manually
+ // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, writablePartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return nil, metadata.Err
+ }
+ return dupeAndSort(metadata.Replicas), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ leader, err := client.cachedLeader(topic, partitionID)
+
+ if leader == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ leader, err = client.cachedLeader(topic, partitionID)
+ }
+
+ return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+ // error. This handles the case by returning an error instead of sending it
+ // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+ for _, topic := range topics {
+ if len(topic) == 0 {
+ return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+ }
+ }
+
+ return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+ if client.Closed() {
+ return -1, ErrClosedClient
+ }
+
+ offset, err := client.getOffset(topic, partitionID, time)
+
+ if err != nil {
+ if err := client.RefreshMetadata(topic); err != nil {
+ return -1, err
+ }
+ return client.getOffset(topic, partitionID, time)
+ }
+
+ return offset, err
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedCoordinator(consumerGroup)
+
+ if coordinator == nil {
+ if err := client.RefreshCoordinator(consumerGroup); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedCoordinator(consumerGroup)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.coordinators[consumerGroup] = response.Coordinator.ID()
+ return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+ if client.brokers[broker.ID()] == nil {
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+ client.deadSeeds = append(client.deadSeeds, broker)
+ client.seedBrokers = client.seedBrokers[1:]
+ } else {
+ // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+ // but we really shouldn't have to; once that loop is made better this case can be
+ // removed, and the function generally can be renamed from `deregisterBroker` to
+ // `nextSeedBroker` or something
+ Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+ delete(client.brokers, broker.ID())
+ }
+}
+
+func (client *client) resurrectDeadBrokers() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+ client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+ client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ if len(client.seedBrokers) > 0 {
+ _ = client.seedBrokers[0].Open(client.conf)
+ return client.seedBrokers[0]
+ }
+
+ // not guaranteed to be random *or* deterministic
+ for _, broker := range client.brokers {
+ _ = broker.Open(client.conf)
+ return broker
+ }
+
+ return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+ allPartitions partitionType = iota
+ writablePartitions
+ // If you add any more types, update the partition cache in update()
+
+ // Ensure this is the last partition type value
+ maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ return partitions[partitionID]
+ }
+
+ return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions, exists := client.cachedPartitionsResults[topic]
+
+ if !exists {
+ return nil
+ }
+ return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+ partitions := client.metadata[topic]
+
+ if partitions == nil {
+ return nil
+ }
+
+ ret := make([]int32, 0, len(partitions))
+ for _, partition := range partitions {
+ if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+ continue
+ }
+ ret = append(ret, partition.ID)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ metadata, ok := partitions[partitionID]
+ if ok {
+ if metadata.Err == ErrLeaderNotAvailable {
+ return nil, ErrLeaderNotAvailable
+ }
+ b := client.brokers[metadata.Leader]
+ if b == nil {
+ return nil, ErrLeaderNotAvailable
+ }
+ _ = b.Open(client.conf)
+ return b, nil
+ }
+ }
+
+ return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+ broker, err := client.Leader(topic, partitionID)
+ if err != nil {
+ return -1, err
+ }
+
+ request := &OffsetRequest{}
+ request.AddBlock(topic, partitionID, time, 1)
+
+ response, err := broker.GetAvailableOffsets(request)
+ if err != nil {
+ _ = broker.Close()
+ return -1, err
+ }
+
+ block := response.GetBlock(topic, partitionID)
+ if block == nil {
+ _ = broker.Close()
+ return -1, ErrIncompleteResponse
+ }
+ if block.Err != ErrNoError {
+ return -1, block.Err
+ }
+ if len(block.Offsets) != 1 {
+ return -1, ErrOffsetOutOfRange
+ }
+
+ return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+ defer close(client.closed)
+
+ if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := client.RefreshMetadata(); err != nil {
+ Logger.Println("Client background metadata update:", err)
+ }
+ case <-client.closer:
+ return
+ }
+ }
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+ retry := func(err error) error {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+ }
+ return err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ if len(topics) > 0 {
+ Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+ } else {
+ Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+ }
+ response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
+
+ switch err.(type) {
+ case nil:
+ // valid response, use it
+ if shouldRetry, err := client.updateMetadata(response); shouldRetry {
+ Logger.Println("client/metadata found some partitions to be leaderless")
+ return retry(err) // note: err can be nil
+ } else {
+ return err
+ }
+
+ case PacketEncodingError:
+ // didn't even send, return the error
+ return err
+ default:
+ // some other error, remove that broker and try again
+ Logger.Println("client/metadata got error from broker while fetching metadata:", err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+
+ Logger.Println("client/metadata no available broker to send metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ // For all the brokers we received:
+ // - if it is a new ID, save it
+ // - if it is an existing ID, but the address we have is stale, discard the old one and save it
+ // - otherwise ignore it, replacing our existing one would just bounce the connection
+ for _, broker := range data.Brokers {
+ client.registerBroker(broker)
+ }
+
+ for _, topic := range data.Topics {
+ delete(client.metadata, topic.Name)
+ delete(client.cachedPartitionsResults, topic.Name)
+
+ switch topic.Err {
+ case ErrNoError:
+ break
+ case ErrInvalidTopic: // don't retry, don't store partial results
+ err = topic.Err
+ continue
+ case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+ err = topic.Err
+ retry = true
+ continue
+ case ErrLeaderNotAvailable: // retry, but store partial partition results
+ retry = true
+ break
+ default: // don't retry, don't store partial results
+ Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+ err = topic.Err
+ continue
+ }
+
+ client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ client.metadata[topic.Name][partition.ID] = partition
+ if partition.Err == ErrLeaderNotAvailable {
+ retry = true
+ }
+ }
+
+ var partitionCache [maxPartitionIndex][]int32
+ partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+ partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+ client.cachedPartitionsResults[topic.Name] = partitionCache
+ }
+
+ return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
+ retry := func(err error) (*ConsumerMetadataResponse, error) {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+ }
+ return nil, err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ Logger.Printf("client/coordinator requesting coordinator for consumergoup %s from %s\n", consumerGroup, broker.Addr())
+
+ request := new(ConsumerMetadataRequest)
+ request.ConsumerGroup = consumerGroup
+
+ response, err := broker.GetConsumerMetadata(request)
+
+ if err != nil {
+ Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+ switch err.(type) {
+ case PacketEncodingError:
+ return nil, err
+ default:
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ continue
+ }
+ }
+
+ switch response.Err {
+ case ErrNoError:
+ Logger.Printf("client/coordinator coordinator for consumergoup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+ return response, nil
+
+ case ErrConsumerCoordinatorNotAvailable:
+ Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+ // This is very ugly, but this scenario will only happen once per cluster.
+ // The __consumer_offsets topic only has to be created one time.
+ // The number of partitions not configurable, but partition 0 should always exist.
+ if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+ Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+
+ return retry(ErrConsumerCoordinatorNotAvailable)
+ default:
+ return nil, response.Err
+ }
+ }
+
+ Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/config.go b/Godeps/_workspace/src/github.com/Shopify/sarama/config.go
new file mode 100644
index 0000000..542c611
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/config.go
@@ -0,0 +1,344 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "time"
+)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+ // Net is the namespace for network-level properties used by the Broker, and
+ // shared by the Client/Producer/Consumer.
+ Net struct {
+ // How many outstanding requests a connection is allowed to have before
+ // sending on it blocks (default 5).
+ MaxOpenRequests int
+
+ // All three of the below configurations are similar to the
+ // `socket.timeout.ms` setting in JVM kafka. All of them default
+ // to 30 seconds.
+ DialTimeout time.Duration // How long to wait for the initial connection.
+ ReadTimeout time.Duration // How long to wait for a response.
+ WriteTimeout time.Duration // How long to wait for a transmit.
+
+ // NOTE: these config values have no compatibility guarantees; they may
+ // change when Kafka releases its official TLS support in version 0.9.
+ TLS struct {
+ // Whether or not to use TLS when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The TLS configuration to use for secure connections if
+ // enabled (defaults to nil).
+ Config *tls.Config
+ }
+
+ // KeepAlive specifies the keep-alive period for an active network connection.
+ // If zero, keep-alives are disabled. (default is 0: disabled).
+ KeepAlive time.Duration
+ }
+
+ // Metadata is the namespace for metadata management properties used by the
+ // Client, and shared by the Producer/Consumer.
+ Metadata struct {
+ Retry struct {
+ // The total number of times to retry a metadata request when the
+ // cluster is in the middle of a leader election (default 3).
+ Max int
+ // How long to wait for leader election to occur before retrying
+ // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+ Backoff time.Duration
+ }
+ // How frequently to refresh the cluster metadata in the background.
+ // Defaults to 10 minutes. Set to 0 to disable. Similar to
+ // `topic.metadata.refresh.interval.ms` in the JVM version.
+ RefreshFrequency time.Duration
+ }
+
+ // Producer is the namespace for configuration related to producing messages,
+ // used by the Producer.
+ Producer struct {
+ // The maximum permitted size of a message (defaults to 1000000). Should be
+ // set equal to or smaller than the broker's `message.max.bytes`.
+ MaxMessageBytes int
+ // The level of acknowledgement reliability needed from the broker (defaults
+ // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+ // JVM producer.
+ RequiredAcks RequiredAcks
+ // The maximum duration the broker will wait the receipt of the number of
+ // RequiredAcks (defaults to 10 seconds). This is only relevant when
+ // RequiredAcks is set to WaitForAll or a number > 1. Only supports
+ // millisecond resolution, nanoseconds will be truncated. Equivalent to
+ // the JVM producer's `request.timeout.ms` setting.
+ Timeout time.Duration
+ // The type of compression to use on messages (defaults to no compression).
+ // Similar to `compression.codec` setting of the JVM producer.
+ Compression CompressionCodec
+ // Generates partitioners for choosing the partition to send messages to
+ // (defaults to hashing the message key). Similar to the `partitioner.class`
+ // setting for the JVM producer.
+ Partitioner PartitionerConstructor
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock.
+ Return struct {
+ // If enabled, successfully delivered messages will be returned on the
+ // Successes channel (default disabled).
+ Successes bool
+
+ // If enabled, messages that failed to deliver will be returned on the
+ // Errors channel, including error (default enabled).
+ Errors bool
+ }
+
+ // The following config options control how often messages are batched up and
+ // sent to the broker. By default, messages are sent as fast as possible, and
+ // all messages received while the current batch is in-flight are placed
+ // into the subsequent batch.
+ Flush struct {
+ // The best-effort number of bytes needed to trigger a flush. Use the
+ // global sarama.MaxRequestSize to set a hard upper limit.
+ Bytes int
+ // The best-effort number of messages needed to trigger a flush. Use
+ // `MaxMessages` to set a hard upper limit.
+ Messages int
+ // The best-effort frequency of flushes. Equivalent to
+ // `queue.buffering.max.ms` setting of JVM producer.
+ Frequency time.Duration
+ // The maximum number of messages the producer will send in a single
+ // broker request. Defaults to 0 for unlimited. Similar to
+ // `queue.buffering.max.messages` in the JVM producer.
+ MaxMessages int
+ }
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 3).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 100ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ }
+ }
+
+ // Consumer is the namespace for configuration related to consuming messages,
+ // used by the Consumer.
+ Consumer struct {
+ Retry struct {
+ // How long to wait after a failing to read from a partition before
+ // trying again (default 2s).
+ Backoff time.Duration
+ }
+
+ // Fetch is the namespace for controlling how many bytes are retrieved by any
+ // given request.
+ Fetch struct {
+ // The minimum number of message bytes to fetch in a request - the broker
+ // will wait until at least this many are available. The default is 1,
+ // as 0 causes the consumer to spin when no messages are available.
+ // Equivalent to the JVM's `fetch.min.bytes`.
+ Min int32
+ // The default number of message bytes to fetch from the broker in each
+ // request (default 32768). This should be larger than the majority of
+ // your messages, or else the consumer will spend a lot of time
+ // negotiating sizes and not actually consuming. Similar to the JVM's
+ // `fetch.message.max.bytes`.
+ Default int32
+ // The maximum number of message bytes to fetch from the broker in a
+ // single request. Messages larger than this will return
+ // ErrMessageTooLarge and will not be consumable, so you must be sure
+ // this is at least as large as your largest message. Defaults to 0
+ // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+ // global `sarama.MaxResponseSize` still applies.
+ Max int32
+ }
+ // The maximum amount of time the broker will wait for Consumer.Fetch.Min
+ // bytes to become available before it returns fewer than that anyways. The
+ // default is 250ms, since 0 causes the consumer to spin when no events are
+ // available. 100-500ms is a reasonable range for most cases. Kafka only
+ // supports precision up to milliseconds; nanoseconds will be truncated.
+ // Equivalent to the JVM's `fetch.wait.max.ms`.
+ MaxWaitTime time.Duration
+
+ // The maximum amount of time the consumer expects a message takes to process
+ // for the user. If writing to the Messages channel takes longer than this,
+ // that partition will stop fetching more messages until it can proceed again.
+ // Note that, since the Messages channel is buffered, the actual grace time is
+ // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+ MaxProcessingTime time.Duration
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from them to prevent deadlock.
+ Return struct {
+ // If enabled, any errors that occured while consuming are returned on
+ // the Errors channel (default disabled).
+ Errors bool
+ }
+
+ // Offsets specifies configuration for how and when to commit consumed
+ // offsets. This currently requires the manual use of an OffsetManager
+ // but will eventually be automated.
+ Offsets struct {
+ // How frequently to commit updated offsets. Defaults to 1s.
+ CommitInterval time.Duration
+
+ // The initial offset to use if no offset was previously committed.
+ // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+ Initial int64
+ }
+ }
+
+ // A user-provided string sent with every request to the brokers for logging,
+ // debugging, and auditing purposes. Defaults to "sarama", but you should
+ // probably set it to something specific to your application.
+ ClientID string
+ // The number of events to buffer in internal and external channels. This
+ // permits the producer and consumer to continue processing some messages
+ // in the background while user code is working, greatly improving throughput.
+ // Defaults to 256.
+ ChannelBufferSize int
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{}
+
+ c.Net.MaxOpenRequests = 5
+ c.Net.DialTimeout = 30 * time.Second
+ c.Net.ReadTimeout = 30 * time.Second
+ c.Net.WriteTimeout = 30 * time.Second
+
+ c.Metadata.Retry.Max = 3
+ c.Metadata.Retry.Backoff = 250 * time.Millisecond
+ c.Metadata.RefreshFrequency = 10 * time.Minute
+
+ c.Producer.MaxMessageBytes = 1000000
+ c.Producer.RequiredAcks = WaitForLocal
+ c.Producer.Timeout = 10 * time.Second
+ c.Producer.Partitioner = NewHashPartitioner
+ c.Producer.Retry.Max = 3
+ c.Producer.Retry.Backoff = 100 * time.Millisecond
+ c.Producer.Return.Errors = true
+
+ c.Consumer.Fetch.Min = 1
+ c.Consumer.Fetch.Default = 32768
+ c.Consumer.Retry.Backoff = 2 * time.Second
+ c.Consumer.MaxWaitTime = 250 * time.Millisecond
+ c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+ c.Consumer.Return.Errors = false
+ c.Consumer.Offsets.CommitInterval = 1 * time.Second
+ c.Consumer.Offsets.Initial = OffsetNewest
+
+ c.ChannelBufferSize = 256
+
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+ // some configuration values should be warned on but not fail completely, do those first
+ if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+ Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+ }
+ if c.Producer.RequiredAcks > 1 {
+ Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+ }
+ if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Timeout%time.Millisecond != 0 {
+ Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+ }
+ if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+ Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+ }
+ if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+ Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.ClientID == "sarama" {
+ Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+ }
+
+ // validate Net values
+ switch {
+ case c.Net.MaxOpenRequests <= 0:
+ return ConfigurationError("Net.MaxOpenRequests must be > 0")
+ case c.Net.DialTimeout <= 0:
+ return ConfigurationError("Net.DialTimeout must be > 0")
+ case c.Net.ReadTimeout <= 0:
+ return ConfigurationError("Net.ReadTimeout must be > 0")
+ case c.Net.WriteTimeout <= 0:
+ return ConfigurationError("Net.WriteTimeout must be > 0")
+ case c.Net.KeepAlive < 0:
+ return ConfigurationError("Net.KeepAlive must be >= 0")
+ }
+
+ // validate the Metadata values
+ switch {
+ case c.Metadata.Retry.Max < 0:
+ return ConfigurationError("Metadata.Retry.Max must be >= 0")
+ case c.Metadata.Retry.Backoff < 0:
+ return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+ case c.Metadata.RefreshFrequency < 0:
+ return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+ }
+
+ // validate the Producer values
+ switch {
+ case c.Producer.MaxMessageBytes <= 0:
+ return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+ case c.Producer.RequiredAcks < -1:
+ return ConfigurationError("Producer.RequiredAcks must be >= -1")
+ case c.Producer.Timeout <= 0:
+ return ConfigurationError("Producer.Timeout must be > 0")
+ case c.Producer.Partitioner == nil:
+ return ConfigurationError("Producer.Partitioner must not be nil")
+ case c.Producer.Flush.Bytes < 0:
+ return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+ case c.Producer.Flush.Messages < 0:
+ return ConfigurationError("Producer.Flush.Messages must be >= 0")
+ case c.Producer.Flush.Frequency < 0:
+ return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+ case c.Producer.Flush.MaxMessages < 0:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+ case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+ case c.Producer.Retry.Max < 0:
+ return ConfigurationError("Producer.Retry.Max must be >= 0")
+ case c.Producer.Retry.Backoff < 0:
+ return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+ }
+
+ // validate the Consumer values
+ switch {
+ case c.Consumer.Fetch.Min <= 0:
+ return ConfigurationError("Consumer.Fetch.Min must be > 0")
+ case c.Consumer.Fetch.Default <= 0:
+ return ConfigurationError("Consumer.Fetch.Default must be > 0")
+ case c.Consumer.Fetch.Max < 0:
+ return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+ case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+ return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+ case c.Consumer.MaxProcessingTime <= 0:
+ return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+ case c.Consumer.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+ case c.Consumer.Offsets.CommitInterval <= 0:
+ return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+ case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+ return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+
+ }
+
+ // validate misc shared values
+ switch {
+ case c.ChannelBufferSize < 0:
+ return ConfigurationError("ChannelBufferSize must be >= 0")
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 0000000..877fb04
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,690 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+ Topic string
+ Partition int32
+ Err error
+}
+
+func (ce ConsumerError) Error() string {
+ return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+ return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+//
+// Sarama's Consumer type does not currently support automatic consumer group rebalancing and offset tracking,
+// however the https://github.com/wvanbergen/kafka library builds on Sarama to add this support. We plan
+// to properly integrate this functionality at a later date.
+type Consumer interface {
+
+ // Topics returns the set of available topics as retrieved from the cluster
+ // metadata. This method is the same as Client.Topics(), and is provided for
+ // convenience.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ // This method is the same as Client.Partitions(), and is provided for convenience.
+ Partitions(topic string) ([]int32, error)
+
+ // ConsumePartition creates a PartitionConsumer on the given topic/partition with
+ // the given offset. It will return an error if this Consumer is already consuming
+ // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+ // or OffsetOldest
+ ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+ // Close shuts down the consumer. It must be called after all child
+ // PartitionConsumers have already been closed.
+ Close() error
+}
+
+type consumer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ lock sync.Mutex
+ children map[string]map[int32]*partitionConsumer
+ brokerConsumers map[*Broker]*brokerConsumer
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := NewConsumerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ c.(*consumer).ownClient = true
+ return c, nil
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ c := &consumer{
+ client: client,
+ conf: client.Config(),
+ children: make(map[string]map[int32]*partitionConsumer),
+ brokerConsumers: make(map[*Broker]*brokerConsumer),
+ }
+
+ return c, nil
+}
+
+func (c *consumer) Close() error {
+ if c.ownClient {
+ return c.client.Close()
+ }
+ return nil
+}
+
+func (c *consumer) Topics() ([]string, error) {
+ return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+ return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+ child := &partitionConsumer{
+ consumer: c,
+ conf: c.conf,
+ topic: topic,
+ partition: partition,
+ messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+ errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
+ feeder: make(chan *FetchResponse, 1),
+ trigger: make(chan none, 1),
+ dying: make(chan none),
+ fetchSize: c.conf.Consumer.Fetch.Default,
+ }
+
+ if err := child.chooseStartingOffset(offset); err != nil {
+ return nil, err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+ return nil, err
+ }
+
+ if err := c.addChild(child); err != nil {
+ return nil, err
+ }
+
+ go withRecover(child.dispatcher)
+ go withRecover(child.responseFeeder)
+
+ child.broker = c.refBrokerConsumer(leader)
+ child.broker.input <- child
+
+ return child, nil
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ topicChildren := c.children[child.topic]
+ if topicChildren == nil {
+ topicChildren = make(map[int32]*partitionConsumer)
+ c.children[child.topic] = topicChildren
+ }
+
+ if topicChildren[child.partition] != nil {
+ return ConfigurationError("That topic/partition is already being consumed")
+ }
+
+ topicChildren[child.partition] = child
+ return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ bc := c.brokerConsumers[broker]
+ if bc == nil {
+ bc = c.newBrokerConsumer(broker)
+ c.brokerConsumers[broker] = bc
+ }
+
+ bc.refs++
+
+ return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ brokerWorker.refs--
+
+ if brokerWorker.refs == 0 {
+ close(brokerWorker.input)
+ if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+ delete(c.brokerConsumers, brokerWorker.broker)
+ }
+ }
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
+// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
+// when it passes out of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+type PartitionConsumer interface {
+
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will
+ // return immediately, after which you should wait until the 'messages' and
+ // 'errors' channel are drained. It is required to call this function, or
+ // Close before a consumer object passes out of scope, as it will otherwise
+ // leak memory. You must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionConsumer from fetching messages. It is required to
+ // call this function (or AsyncClose) before a consumer object passes out of
+ // scope, as it will otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker.
+ Messages() <-chan *ConsumerMessage
+
+ // Errors returns a read channel of errors that occured during consuming, if
+ // enabled. By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+ consumer *consumer
+ conf *Config
+ topic string
+ partition int32
+
+ broker *brokerConsumer
+ messages chan *ConsumerMessage
+ errors chan *ConsumerError
+ feeder chan *FetchResponse
+
+ trigger, dying chan none
+ responseResult error
+
+ fetchSize int32
+ offset int64
+ highWaterMarkOffset int64
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+ cErr := &ConsumerError{
+ Topic: child.topic,
+ Partition: child.partition,
+ Err: err,
+ }
+
+ if child.conf.Consumer.Return.Errors {
+ child.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (child *partitionConsumer) dispatcher() {
+ for _ = range child.trigger {
+ select {
+ case <-child.dying:
+ close(child.trigger)
+ case <-time.After(child.conf.Consumer.Retry.Backoff):
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ child.broker = nil
+ }
+
+ Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+ if err := child.dispatch(); err != nil {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+ }
+
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ }
+ child.consumer.removeChild(child)
+ close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+ if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+ return err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+ return err
+ }
+
+ child.broker = child.consumer.refBrokerConsumer(leader)
+
+ child.broker.input <- child
+
+ return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+ newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+ if err != nil {
+ return err
+ }
+ oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case offset == OffsetNewest:
+ child.offset = newestOffset
+ case offset == OffsetOldest:
+ child.offset = oldestOffset
+ case offset >= oldestOffset && offset <= newestOffset:
+ child.offset = offset
+ default:
+ return ErrOffsetOutOfRange
+ }
+
+ return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+ return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+ return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+ // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+ // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+ // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+ // also just close itself)
+ close(child.dying)
+}
+
+func (child *partitionConsumer) Close() error {
+ child.AsyncClose()
+
+ go withRecover(func() {
+ for _ = range child.messages {
+ // drain
+ }
+ })
+
+ var errors ConsumerErrors
+ for err := range child.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+ var msgs []*ConsumerMessage
+
+feederLoop:
+ for response := range child.feeder {
+ msgs, child.responseResult = child.parseResponse(response)
+
+ for i, msg := range msgs {
+ select {
+ case child.messages <- msg:
+ case <-time.After(child.conf.Consumer.MaxProcessingTime):
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ for _, msg = range msgs[i:] {
+ child.messages <- msg
+ }
+ child.broker.input <- child
+ continue feederLoop
+ }
+ }
+
+ child.broker.acks.Done()
+ }
+
+ close(child.messages)
+ close(child.errors)
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+ block := response.GetBlock(child.topic, child.partition)
+ if block == nil {
+ return nil, ErrIncompleteResponse
+ }
+
+ if block.Err != ErrNoError {
+ return nil, block.Err
+ }
+
+ if len(block.MsgSet.Messages) == 0 {
+ // We got no messages. If we got a trailing one then we need to ask for more data.
+ // Otherwise we just poll again and wait for one to be produced...
+ if block.MsgSet.PartialTrailingMessage {
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+ // we can't ask for more data, we've hit the configured limit
+ child.sendError(ErrMessageTooLarge)
+ child.offset++ // skip this one so we can keep processing future messages
+ } else {
+ child.fetchSize *= 2
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+ child.fetchSize = child.conf.Consumer.Fetch.Max
+ }
+ }
+ }
+
+ return nil, nil
+ }
+
+ // we got messages, reset our fetch size in case it was increased for a previous request
+ child.fetchSize = child.conf.Consumer.Fetch.Default
+ atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+ incomplete := false
+ prelude := true
+ var messages []*ConsumerMessage
+ for _, msgBlock := range block.MsgSet.Messages {
+
+ for _, msg := range msgBlock.Messages() {
+ if prelude && msg.Offset < child.offset {
+ continue
+ }
+ prelude = false
+
+ if msg.Offset >= child.offset {
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: msg.Offset,
+ })
+ child.offset = msg.Offset + 1
+ } else {
+ incomplete = true
+ }
+ }
+
+ }
+
+ if incomplete || len(messages) == 0 {
+ return nil, ErrIncompleteResponse
+ }
+ return messages, nil
+}
+
+// brokerConsumer
+
+type brokerConsumer struct {
+ consumer *consumer
+ broker *Broker
+ input chan *partitionConsumer
+ newSubscriptions chan []*partitionConsumer
+ wait chan none
+ subscriptions map[*partitionConsumer]none
+ acks sync.WaitGroup
+ refs int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+ bc := &brokerConsumer{
+ consumer: c,
+ broker: broker,
+ input: make(chan *partitionConsumer),
+ newSubscriptions: make(chan []*partitionConsumer),
+ wait: make(chan none),
+ subscriptions: make(map[*partitionConsumer]none),
+ refs: 0,
+ }
+
+ go withRecover(bc.subscriptionManager)
+ go withRecover(bc.subscriptionConsumer)
+
+ return bc
+}
+
+func (bc *brokerConsumer) subscriptionManager() {
+ var buffer []*partitionConsumer
+
+ // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+ // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+ // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+ // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+ // so the main goroutine can block waiting for work if it has none.
+ for {
+ if len(buffer) > 0 {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- buffer:
+ buffer = nil
+ case bc.wait <- none{}:
+ }
+ } else {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- nil:
+ }
+ }
+ }
+
+done:
+ close(bc.wait)
+ if len(buffer) > 0 {
+ bc.newSubscriptions <- buffer
+ }
+ close(bc.newSubscriptions)
+}
+
+func (bc *brokerConsumer) subscriptionConsumer() {
+ <-bc.wait // wait for our first piece of work
+
+ // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+ for newSubscriptions := range bc.newSubscriptions {
+ bc.updateSubscriptions(newSubscriptions)
+
+ if len(bc.subscriptions) == 0 {
+ // We're about to be shut down or we're about to receive more subscriptions.
+ // Either way, the signal just hasn't propagated to our goroutine yet.
+ <-bc.wait
+ continue
+ }
+
+ response, err := bc.fetchNewMessages()
+
+ if err != nil {
+ Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+ bc.abort(err)
+ return
+ }
+
+ bc.acks.Add(len(bc.subscriptions))
+ for child := range bc.subscriptions {
+ child.feeder <- response
+ }
+ bc.acks.Wait()
+ bc.handleResponses()
+ }
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+ for _, child := range newSubscriptions {
+ bc.subscriptions[child] = none{}
+ Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ }
+
+ for child := range bc.subscriptions {
+ select {
+ case <-child.dying:
+ Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ default:
+ break
+ }
+ }
+}
+
+func (bc *brokerConsumer) handleResponses() {
+ // handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+ for child := range bc.subscriptions {
+ result := child.responseResult
+ child.responseResult = nil
+
+ switch result {
+ case nil:
+ break
+ case errTimedOut:
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+ bc.broker.ID(), child.topic, child.partition)
+ delete(bc.subscriptions, child)
+ case ErrOffsetOutOfRange:
+ // there's no point in retrying this it will just fail the same way again
+ // shut it down and force the user to choose what to do
+ child.sendError(result)
+ Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable:
+ // not an error, but does need redispatching
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ default:
+ // dunno, tell the user and try redispatching
+ child.sendError(result)
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+}
+
+func (bc *brokerConsumer) abort(err error) {
+ bc.consumer.abandonBrokerConsumer(bc)
+ _ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+ for child := range bc.subscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+
+ for newSubscription := range bc.newSubscriptions {
+ for _, child := range newSubscription {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+ request := &FetchRequest{
+ MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
+ MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+ }
+
+ for child := range bc.subscriptions {
+ request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+ }
+
+ return bc.broker.Fetch(request)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..9b8fcd7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,22 @@
+package sarama
+
+type ConsumerMetadataRequest struct {
+ ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+ return pe.putString(r.ConsumerGroup)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder) (err error) {
+ r.ConsumerGroup, err = pd.getString()
+ return err
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..d6b5614
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,73 @@
+package sarama
+
+import (
+ "net"
+ "strconv"
+)
+
+type ConsumerMetadataResponse struct {
+ Err KError
+ Coordinator *Broker
+ CoordinatorID int32 // deprecated: use Coordinator.ID()
+ CoordinatorHost string // deprecated: use Coordinator.Addr()
+ CoordinatorPort int32 // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(tmp)
+
+ coordinator := new(Broker)
+ if err := coordinator.decode(pd); err != nil {
+ return err
+ }
+ if coordinator.addr == ":0" {
+ return nil
+ }
+ r.Coordinator = coordinator
+
+ // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+ // backwards compatibility
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ r.CoordinatorID = r.Coordinator.ID()
+ r.CoordinatorHost = host
+ r.CoordinatorPort = int32(port)
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if r.Coordinator != nil {
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ pe.putInt32(r.Coordinator.ID())
+ if err := pe.putString(host); err != nil {
+ return err
+ }
+ pe.putInt32(int32(port))
+ return nil
+ }
+ pe.putInt32(r.CoordinatorID)
+ if err := pe.putString(r.CoordinatorHost); err != nil {
+ return err
+ }
+ pe.putInt32(r.CoordinatorPort)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go b/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 0000000..5c28607
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,36 @@
+package sarama
+
+import (
+ "encoding/binary"
+
+ "github.com/klauspost/crc32"
+)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+ startOffset int
+}
+
+func (c *crc32Field) saveOffset(in int) {
+ c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+ return 4
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+ crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
+ binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+ return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+ crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
+
+ if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
+ return PacketDecodingError{"CRC didn't match"}
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/describe_groups_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 0000000..c9426a6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,26 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+ Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+ return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder) (err error) {
+ r.Groups, err = pd.getStringArray()
+ return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/describe_groups_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 0000000..b4b32dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,162 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+ Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+
+ for _, groupDescription := range r.Groups {
+ if err := groupDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Groups = make([]*GroupDescription, n)
+ for i := 0; i < n; i++ {
+ r.Groups[i] = new(GroupDescription)
+ if err := r.Groups[i].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type GroupDescription struct {
+ Err KError
+ GroupId string
+ State string
+ ProtocolType string
+ Protocol string
+ Members map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+ pe.putInt16(int16(gd.Err))
+
+ if err := pe.putString(gd.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.State); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.ProtocolType); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.Protocol); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(gd.Members)); err != nil {
+ return err
+ }
+
+ for memberId, groupMemberDescription := range gd.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := groupMemberDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+ if kerr, err := pd.getInt16(); err != nil {
+ return err
+ } else {
+ gd.Err = KError(kerr)
+ }
+
+ if gd.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.State, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.Protocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ gd.Members = make(map[string]*GroupMemberDescription)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ gd.Members[memberId] = new(GroupMemberDescription)
+ if err := gd.Members[memberId].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type GroupMemberDescription struct {
+ ClientId string
+ ClientHost string
+ MemberMetadata []byte
+ MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+ if err := pe.putString(gmd.ClientId); err != nil {
+ return err
+ }
+ if err := pe.putString(gmd.ClientHost); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+ if gmd.ClientId, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.ClientHost, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+ return
+ }
+ if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 0000000..b91efaa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,62 @@
+package sarama
+
+import "fmt"
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+ encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes.
+func encode(e encoder) ([]byte, error) {
+ if e == nil {
+ return nil, nil
+ }
+
+ var prepEnc prepEncoder
+ var realEnc realEncoder
+
+ err := e.encode(&prepEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+ return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+ }
+
+ realEnc.raw = make([]byte, prepEnc.length)
+ err = e.encode(&realEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+ decode(pd packetDecoder) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go b/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go
new file mode 100644
index 0000000..a837087
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,179 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+ Info string
+}
+
+func (err PacketEncodingError) Error() string {
+ return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+ Info string
+}
+
+func (err PacketDecodingError) Error() string {
+ return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+ return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+ ErrNoError KError = 0
+ ErrUnknown KError = -1
+ ErrOffsetOutOfRange KError = 1
+ ErrInvalidMessage KError = 2
+ ErrUnknownTopicOrPartition KError = 3
+ ErrInvalidMessageSize KError = 4
+ ErrLeaderNotAvailable KError = 5
+ ErrNotLeaderForPartition KError = 6
+ ErrRequestTimedOut KError = 7
+ ErrBrokerNotAvailable KError = 8
+ ErrReplicaNotAvailable KError = 9
+ ErrMessageSizeTooLarge KError = 10
+ ErrStaleControllerEpochCode KError = 11
+ ErrOffsetMetadataTooLarge KError = 12
+ ErrOffsetsLoadInProgress KError = 14
+ ErrConsumerCoordinatorNotAvailable KError = 15
+ ErrNotCoordinatorForConsumer KError = 16
+ ErrInvalidTopic KError = 17
+ ErrMessageSetSizeTooLarge KError = 18
+ ErrNotEnoughReplicas KError = 19
+ ErrNotEnoughReplicasAfterAppend KError = 20
+ ErrInvalidRequiredAcks KError = 21
+ ErrIllegalGeneration KError = 22
+ ErrInconsistentGroupProtocol KError = 23
+ ErrInvalidGroupId KError = 24
+ ErrUnknownMemberId KError = 25
+ ErrInvalidSessionTimeout KError = 26
+ ErrRebalanceInProgress KError = 27
+ ErrInvalidCommitOffsetSize KError = 28
+ ErrTopicAuthorizationFailed KError = 29
+ ErrGroupAuthorizationFailed KError = 30
+ ErrClusterAuthorizationFailed KError = 31
+)
+
+func (err KError) Error() string {
+ // Error messages stolen/adapted from
+ // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+ switch err {
+ case ErrNoError:
+ return "kafka server: Not an error, why are you printing me?"
+ case ErrUnknown:
+ return "kafka server: Unexpected (unknown?) server error."
+ case ErrOffsetOutOfRange:
+ return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+ case ErrInvalidMessage:
+ return "kafka server: Message contents does not match its CRC."
+ case ErrUnknownTopicOrPartition:
+ return "kafka server: Request was for a topic or partition that does not exist on this broker."
+ case ErrInvalidMessageSize:
+ return "kafka server: The message has a negative size."
+ case ErrLeaderNotAvailable:
+ return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+ case ErrNotLeaderForPartition:
+ return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+ case ErrRequestTimedOut:
+ return "kafka server: Request exceeded the user-specified time limit in the request."
+ case ErrBrokerNotAvailable:
+ return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+ case ErrReplicaNotAvailable:
+ return "kafka server: Replica infomation not available, one or more brokers are down."
+ case ErrMessageSizeTooLarge:
+ return "kafka server: Message was too large, server rejected it to avoid allocation error."
+ case ErrStaleControllerEpochCode:
+ return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+ case ErrOffsetMetadataTooLarge:
+ return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+ case ErrOffsetsLoadInProgress:
+ return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+ case ErrConsumerCoordinatorNotAvailable:
+ return "kafka server: Offset's topic has not yet been created."
+ case ErrNotCoordinatorForConsumer:
+ return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+ case ErrInvalidTopic:
+ return "kafka server: The request attempted to perform an operation on an invalid topic."
+ case ErrMessageSetSizeTooLarge:
+ return "kafka server: The request included message batch larger than the configured segment size on the server."
+ case ErrNotEnoughReplicas:
+ return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+ case ErrNotEnoughReplicasAfterAppend:
+ return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+ case ErrInvalidRequiredAcks:
+ return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+ case ErrIllegalGeneration:
+ return "kafka server: The provided generation id is not the current generation."
+ case ErrInconsistentGroupProtocol:
+ return "kafka server: The provider group protocol type is incompatible with the other members."
+ case ErrInvalidGroupId:
+ return "kafka server: The provided group id was empty."
+ case ErrUnknownMemberId:
+ return "kafka server: The provided member is not known in the current generation."
+ case ErrInvalidSessionTimeout:
+ return "kafka server: The provided session timeout is outside the allowed range."
+ case ErrRebalanceInProgress:
+ return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+ case ErrInvalidCommitOffsetSize:
+ return "kafka server: The provided commit metadata was too large."
+ case ErrTopicAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this topic."
+ case ErrGroupAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this group."
+ case ErrClusterAuthorizationFailed:
+ return "kafka server: The client is not authorized to send this request type."
+ }
+
+ return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md
new file mode 100644
index 0000000..b658805
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md
@@ -0,0 +1,9 @@
+# Sarama examples
+
+This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
+
+In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
+
+#### HTTP server
+
+[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore
new file mode 100644
index 0000000..9f6ed42
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore
@@ -0,0 +1,2 @@
+http_server
+http_server.test
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md
new file mode 100644
index 0000000..5ff2bc2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md
@@ -0,0 +1,7 @@
+# HTTP server example
+
+This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
+
+If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
+
+One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go
new file mode 100644
index 0000000..03e47b6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go
@@ -0,0 +1,246 @@
+package main
+
+import (
+ "github.com/Shopify/sarama"
+
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+)
+
+var (
+ addr = flag.String("addr", ":8080", "The address to bind to")
+ brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
+ verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
+ certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
+ keyFile = flag.String("key", "", "The optional key file for client authentication")
+ caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
+ verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
+)
+
+func main() {
+ flag.Parse()
+
+ if *verbose {
+ sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
+ }
+
+ if *brokers == "" {
+ flag.PrintDefaults()
+ os.Exit(1)
+ }
+
+ brokerList := strings.Split(*brokers, ",")
+ log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
+
+ server := &Server{
+ DataCollector: newDataCollector(brokerList),
+ AccessLogProducer: newAccessLogProducer(brokerList),
+ }
+ defer func() {
+ if err := server.Close(); err != nil {
+ log.Println("Failed to close server", err)
+ }
+ }()
+
+ log.Fatal(server.Run(*addr))
+}
+
+func createTlsConfiguration() (t *tls.Config) {
+ if *certFile != "" && *keyFile != "" && *caFile != "" {
+ cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ caCert, err := ioutil.ReadFile(*caFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(caCert)
+
+ t = &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: caCertPool,
+ InsecureSkipVerify: *verifySsl,
+ }
+ }
+ // will be nil by default if nothing is provided
+ return t
+}
+
+type Server struct {
+ DataCollector sarama.SyncProducer
+ AccessLogProducer sarama.AsyncProducer
+}
+
+func (s *Server) Close() error {
+ if err := s.DataCollector.Close(); err != nil {
+ log.Println("Failed to shut down data collector cleanly", err)
+ }
+
+ if err := s.AccessLogProducer.Close(); err != nil {
+ log.Println("Failed to shut down access log producer cleanly", err)
+ }
+
+ return nil
+}
+
+func (s *Server) Handler() http.Handler {
+ return s.withAccessLog(s.collectQueryStringData())
+}
+
+func (s *Server) Run(addr string) error {
+ httpServer := &http.Server{
+ Addr: addr,
+ Handler: s.Handler(),
+ }
+
+ log.Printf("Listening for requests on %s...\n", addr)
+ return httpServer.ListenAndServe()
+}
+
+func (s *Server) collectQueryStringData() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ // We are not setting a message key, which means that all messages will
+ // be distributed randomly over the different partitions.
+ partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
+ Topic: "important",
+ Value: sarama.StringEncoder(r.URL.RawQuery),
+ })
+
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Failed to store your data:, %s", err)
+ } else {
+ // The tuple (topic, partition, offset) can be used as a unique identifier
+ // for a message in a Kafka cluster.
+ fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
+ }
+ })
+}
+
+type accessLogEntry struct {
+ Method string `json:"method"`
+ Host string `json:"host"`
+ Path string `json:"path"`
+ IP string `json:"ip"`
+ ResponseTime float64 `json:"response_time"`
+
+ encoded []byte
+ err error
+}
+
+func (ale *accessLogEntry) ensureEncoded() {
+ if ale.encoded == nil && ale.err == nil {
+ ale.encoded, ale.err = json.Marshal(ale)
+ }
+}
+
+func (ale *accessLogEntry) Length() int {
+ ale.ensureEncoded()
+ return len(ale.encoded)
+}
+
+func (ale *accessLogEntry) Encode() ([]byte, error) {
+ ale.ensureEncoded()
+ return ale.encoded, ale.err
+}
+
+func (s *Server) withAccessLog(next http.Handler) http.Handler {
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ started := time.Now()
+
+ next.ServeHTTP(w, r)
+
+ entry := &accessLogEntry{
+ Method: r.Method,
+ Host: r.Host,
+ Path: r.RequestURI,
+ IP: r.RemoteAddr,
+ ResponseTime: float64(time.Since(started)) / float64(time.Second),
+ }
+
+ // We will use the client's IP address as key. This will cause
+ // all the access log entries of the same IP address to end up
+ // on the same partition.
+ s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
+ Topic: "access_log",
+ Key: sarama.StringEncoder(r.RemoteAddr),
+ Value: entry,
+ }
+ })
+}
+
+func newDataCollector(brokerList []string) sarama.SyncProducer {
+
+ // For the data collector, we are looking for strong consistency semantics.
+ // Because we don't change the flush settings, sarama will try to produce messages
+ // as fast as possible to keep latency low.
+ config := sarama.NewConfig()
+ config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
+ config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
+ tlsConfig := createTlsConfiguration()
+ if tlsConfig != nil {
+ config.Net.TLS.Config = tlsConfig
+ config.Net.TLS.Enable = true
+ }
+
+ // On the broker side, you may want to change the following settings to get
+ // stronger consistency guarantees:
+ // - For your broker, set `unclean.leader.election.enable` to false
+ // - For the topic, you could increase `min.insync.replicas`.
+
+ producer, err := sarama.NewSyncProducer(brokerList, config)
+ if err != nil {
+ log.Fatalln("Failed to start Sarama producer:", err)
+ }
+
+ return producer
+}
+
+func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
+
+ // For the access log, we are looking for AP semantics, with high throughput.
+ // By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
+ config := sarama.NewConfig()
+ tlsConfig := createTlsConfiguration()
+ if tlsConfig != nil {
+ config.Net.TLS.Enable = true
+ config.Net.TLS.Config = tlsConfig
+ }
+ config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
+ config.Producer.Compression = sarama.CompressionSnappy // Compress messages
+ config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
+
+ producer, err := sarama.NewAsyncProducer(brokerList, config)
+ if err != nil {
+ log.Fatalln("Failed to start Sarama producer:", err)
+ }
+
+ // We will just log to STDOUT if we're not able to produce messages.
+ // Note: messages will only be returned here after all retry attempts are exhausted.
+ go func() {
+ for err := range producer.Errors() {
+ log.Println("Failed to write access log entry:", err)
+ }
+ }()
+
+ return producer
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 0000000..3c00fad
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,123 @@
+package sarama
+
+type fetchRequestBlock struct {
+ fetchOffset int64
+ maxBytes int32
+}
+
+func (f *fetchRequestBlock) encode(pe packetEncoder) error {
+ pe.putInt64(f.fetchOffset)
+ pe.putInt32(f.maxBytes)
+ return nil
+}
+
+func (f *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+ if f.fetchOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if f.maxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type FetchRequest struct {
+ MaxWaitTime int32
+ MinBytes int32
+ blocks map[string]map[int32]*fetchRequestBlock
+}
+
+func (f *FetchRequest) encode(pe packetEncoder) (err error) {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ pe.putInt32(f.MaxWaitTime)
+ pe.putInt32(f.MinBytes)
+ err = pe.putArrayLength(len(f.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, blocks := range f.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(blocks))
+ if err != nil {
+ return err
+ }
+ for partition, block := range blocks {
+ pe.putInt32(partition)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (f *FetchRequest) decode(pd packetDecoder) (err error) {
+ if _, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if f.MaxWaitTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if f.MinBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ f.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ f.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ fetchBlock := &fetchRequestBlock{}
+ if err = fetchBlock.decode(pd); err != nil {
+ return nil
+ }
+ f.blocks[topic][partition] = fetchBlock
+ }
+ }
+ return nil
+}
+
+func (f *FetchRequest) key() int16 {
+ return 1
+}
+
+func (f *FetchRequest) version() int16 {
+ return 0
+}
+
+func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+ if f.blocks == nil {
+ f.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ }
+
+ if f.blocks[topic] == nil {
+ f.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ }
+
+ tmp := new(fetchRequestBlock)
+ tmp.maxBytes = maxBytes
+ tmp.fetchOffset = fetchOffset
+
+ f.blocks[topic][partitionID] = tmp
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 0000000..1ac5439
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,173 @@
+package sarama
+
+type FetchResponseBlock struct {
+ Err KError
+ HighWaterMarkOffset int64
+ MsgSet MessageSet
+}
+
+func (pr *FetchResponseBlock) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pr.Err = KError(tmp)
+
+ pr.HighWaterMarkOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ msgSetSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ msgSetDecoder, err := pd.getSubset(int(msgSetSize))
+ if err != nil {
+ return err
+ }
+ err = (&pr.MsgSet).decode(msgSetDecoder)
+
+ return err
+}
+
+type FetchResponse struct {
+ Blocks map[string]map[int32]*FetchResponseBlock
+}
+
+func (pr *FetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(pr.Err))
+
+ pe.putInt64(pr.HighWaterMarkOffset)
+
+ pe.push(&lengthField{})
+ err = pr.MsgSet.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (fr *FetchResponse) decode(pd packetDecoder) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ fr.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ fr.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(FetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ fr.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (fr *FetchResponse) encode(pe packetEncoder) (err error) {
+ err = pe.putArrayLength(len(fr.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range fr.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+
+ for id, block := range partitions {
+ pe.putInt32(id)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func (fr *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+ if fr.Blocks == nil {
+ return nil
+ }
+
+ if fr.Blocks[topic] == nil {
+ return nil
+ }
+
+ return fr.Blocks[topic][partition]
+}
+
+func (fr *FetchResponse) AddError(topic string, partition int32, err KError) {
+ if fr.Blocks == nil {
+ fr.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := fr.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ fr.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ frb.Err = err
+}
+
+func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+ if fr.Blocks == nil {
+ fr.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := fr.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ fr.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ var kb []byte
+ var vb []byte
+ if key != nil {
+ kb, _ = key.Encode()
+ }
+ if value != nil {
+ vb, _ = value.Encode()
+ }
+ msg := &Message{Key: kb, Value: vb}
+ msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+ frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/heartbeat_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 0000000..b89d290
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,43 @@
+package sarama
+
+type HeartbeatRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/heartbeat_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 0000000..b48b8c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,20 @@
+package sarama
+
+type HeartbeatResponse struct {
+ Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder) error {
+ if kerr, err := pd.getInt16(); err != nil {
+ return err
+ } else {
+ r.Err = KError(kerr)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/join_group_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 0000000..8bb5ce8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,94 @@
+package sarama
+
+type JoinGroupRequest struct {
+ GroupId string
+ SessionTimeout int32
+ MemberId string
+ ProtocolType string
+ GroupProtocols map[string][]byte
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ pe.putInt32(r.SessionTimeout)
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.ProtocolType); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+ return err
+ }
+ for name, metadata := range r.GroupProtocols {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(metadata); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.SessionTimeout, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupProtocols = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ metadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.GroupProtocols[name] = metadata
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+ if r.GroupProtocols == nil {
+ r.GroupProtocols = make(map[string][]byte)
+ }
+
+ r.GroupProtocols[name] = metadata
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/join_group_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 0000000..037a9cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,90 @@
+package sarama
+
+type JoinGroupResponse struct {
+ Err KError
+ GenerationId int32
+ GroupProtocol string
+ LeaderId string
+ MemberId string
+ Members map[string][]byte
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.GroupProtocol); err != nil {
+ return err
+ }
+ if err := pe.putString(r.LeaderId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+
+ for memberId, memberMetadata := range r.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(memberMetadata); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder) (err error) {
+ if kerr, err := pd.getInt16(); err != nil {
+ return err
+ } else {
+ r.Err = KError(kerr)
+ }
+
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.GroupProtocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.LeaderId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Members = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ memberMetadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.Members[memberId] = memberMetadata
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/leave_group_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 0000000..cdb4d14
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,36 @@
+package sarama
+
+type LeaveGroupRequest struct {
+ GroupId string
+ MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/leave_group_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 0000000..bad1dba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,20 @@
+package sarama
+
+type LeaveGroupResponse struct {
+ Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder) (err error) {
+ if kerr, err := pd.getInt16(); err != nil {
+ return err
+ } else {
+ r.Err = KError(kerr)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go b/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 0000000..70078be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,29 @@
+package sarama
+
+import "encoding/binary"
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+ startOffset int
+}
+
+func (l *lengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+ return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+ binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+ return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+ if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/list_groups_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 0000000..4d74c26
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,20 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder) (err error) {
+ return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/list_groups_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 0000000..2f53149
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,56 @@
+package sarama
+
+type ListGroupsResponse struct {
+ Err KError
+ Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+ for groupId, protocolType := range r.Groups {
+ if err := pe.putString(groupId); err != nil {
+ return err
+ }
+ if err := pe.putString(protocolType); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder) error {
+ if kerr, err := pd.getInt16(); err != nil {
+ return err
+ } else {
+ r.Err = KError(kerr)
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Groups = make(map[string]string)
+ for i := 0; i < n; i++ {
+ groupId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ protocolType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.Groups[groupId] = protocolType
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message.go
new file mode 100644
index 0000000..c4bdb9e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/message.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+// only the last two bits are really used
+const compressionCodecMask int8 = 0x03
+
+const (
+ CompressionNone CompressionCodec = 0
+ CompressionGZIP CompressionCodec = 1
+ CompressionSnappy CompressionCodec = 2
+)
+
+// The spec just says: "This is a version id used to allow backwards compatible evolution of the message
+// binary format." but it doesn't say what the current value is, so presumably 0...
+const messageFormat int8 = 0
+
+type Message struct {
+ Codec CompressionCodec // codec used to compress the message contents
+ Key []byte // the message key, may be nil
+ Value []byte // the message contents
+ Set *MessageSet // the message set a message might wrap
+
+ compressedCache []byte
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+ pe.push(&crc32Field{})
+
+ pe.putInt8(messageFormat)
+
+ attributes := int8(m.Codec) & compressionCodecMask
+ pe.putInt8(attributes)
+
+ err := pe.putBytes(m.Key)
+ if err != nil {
+ return err
+ }
+
+ var payload []byte
+
+ if m.compressedCache != nil {
+ payload = m.compressedCache
+ m.compressedCache = nil
+ } else {
+ switch m.Codec {
+ case CompressionNone:
+ payload = m.Value
+ case CompressionGZIP:
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+ case CompressionSnappy:
+ tmp := snappyEncode(m.Value)
+ m.compressedCache = tmp
+ payload = m.compressedCache
+ default:
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
+ }
+ }
+
+ if err = pe.putBytes(payload); err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+ err = pd.push(&crc32Field{})
+ if err != nil {
+ return err
+ }
+
+ format, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ if format != messageFormat {
+ return PacketDecodingError{"unexpected messageFormat"}
+ }
+
+ attribute, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ m.Codec = CompressionCodec(attribute & compressionCodecMask)
+
+ m.Key, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ m.Value, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ switch m.Codec {
+ case CompressionNone:
+ // nothing to do
+ case CompressionGZIP:
+ if m.Value == nil {
+ return PacketDecodingError{"GZIP compression specified, but no data to uncompress"}
+ }
+ reader, err := gzip.NewReader(bytes.NewReader(m.Value))
+ if err != nil {
+ return err
+ }
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionSnappy:
+ if m.Value == nil {
+ return PacketDecodingError{"Snappy compression specified, but no data to uncompress"}
+ }
+ if m.Value, err = snappyDecode(m.Value); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ default:
+ return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
+ }
+
+ return pd.pop()
+}
+
+// decodes a message set from a previousy encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+ pd := realDecoder{raw: m.Value}
+ m.Set = &MessageSet{}
+ return m.Set.decode(&pd)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 0000000..f028784
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,89 @@
+package sarama
+
+type MessageBlock struct {
+ Offset int64
+ Msg *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+ if msb.Msg.Set != nil {
+ return msb.Msg.Set.Messages
+ }
+ return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+ pe.putInt64(msb.Offset)
+ pe.push(&lengthField{})
+ err := msb.Msg.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+ if msb.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if err = pd.push(&lengthField{}); err != nil {
+ return err
+ }
+
+ msb.Msg = new(Message)
+ if err = msb.Msg.decode(pd); err != nil {
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type MessageSet struct {
+ PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+ Messages []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+ for i := range ms.Messages {
+ err := ms.Messages[i].encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+ ms.Messages = nil
+
+ for pd.remaining() > 0 {
+ msb := new(MessageBlock)
+ err = msb.decode(pd)
+ switch err {
+ case nil:
+ ms.Messages = append(ms.Messages, msb)
+ case ErrInsufficientData:
+ // As an optimization the server is allowed to return a partial message at the
+ // end of the message set. Clients should handle this case. So we just ignore such things.
+ ms.PartialTrailingMessage = true
+ return nil
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+ block := new(MessageBlock)
+ block.Msg = msg
+ ms.Messages = append(ms.Messages, block)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 0000000..130cfd4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,48 @@
+package sarama
+
+type MetadataRequest struct {
+ Topics []string
+}
+
+func (mr *MetadataRequest) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(mr.Topics))
+ if err != nil {
+ return err
+ }
+
+ for i := range mr.Topics {
+ err = pe.putString(mr.Topics[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (mr *MetadataRequest) decode(pd packetDecoder) error {
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ mr.Topics = make([]string, topicCount)
+ for i := range mr.Topics {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ mr.Topics[i] = topic
+ }
+ return nil
+}
+
+func (mr *MetadataRequest) key() int16 {
+ return 3
+}
+
+func (mr *MetadataRequest) version() int16 {
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 0000000..b82221f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,227 @@
+package sarama
+
+type PartitionMetadata struct {
+ Err KError
+ ID int32
+ Leader int32
+ Replicas []int32
+ Isr []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pm.Err = KError(tmp)
+
+ pm.ID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Leader, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Replicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ pm.Isr, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(pm.Err))
+ pe.putInt32(pm.ID)
+ pe.putInt32(pm.Leader)
+
+ err = pe.putInt32Array(pm.Replicas)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(pm.Isr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type TopicMetadata struct {
+ Err KError
+ Name string
+ Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ tm.Err = KError(tmp)
+
+ tm.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ tm.Partitions = make([]*PartitionMetadata, n)
+ for i := 0; i < n; i++ {
+ tm.Partitions[i] = new(PartitionMetadata)
+ err = tm.Partitions[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(tm.Err))
+
+ err = pe.putString(tm.Name)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(tm.Partitions))
+ if err != nil {
+ return err
+ }
+
+ for _, pm := range tm.Partitions {
+ err = pm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type MetadataResponse struct {
+ Brokers []*Broker
+ Topics []*TopicMetadata
+}
+
+func (m *MetadataResponse) decode(pd packetDecoder) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ m.Brokers = make([]*Broker, n)
+ for i := 0; i < n; i++ {
+ m.Brokers[i] = new(Broker)
+ err = m.Brokers[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ m.Topics = make([]*TopicMetadata, n)
+ for i := 0; i < n; i++ {
+ m.Topics[i] = new(TopicMetadata)
+ err = m.Topics[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *MetadataResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(m.Brokers))
+ if err != nil {
+ return err
+ }
+ for _, broker := range m.Brokers {
+ err = broker.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = pe.putArrayLength(len(m.Topics))
+ if err != nil {
+ return err
+ }
+ for _, tm := range m.Topics {
+ err = tm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// testing API
+
+func (m *MetadataResponse) AddBroker(addr string, id int32) {
+ m.Brokers = append(m.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (m *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+ var tmatch *TopicMetadata
+
+ for _, tm := range m.Topics {
+ if tm.Name == topic {
+ tmatch = tm
+ goto foundTopic
+ }
+ }
+
+ tmatch = new(TopicMetadata)
+ tmatch.Name = topic
+ m.Topics = append(m.Topics, tmatch)
+
+foundTopic:
+
+ tmatch.Err = err
+ return tmatch
+}
+
+func (m *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+ tmatch := m.AddTopic(topic, ErrNoError)
+ var pmatch *PartitionMetadata
+
+ for _, pm := range tmatch.Partitions {
+ if pm.ID == partition {
+ pmatch = pm
+ goto foundPartition
+ }
+ }
+
+ pmatch = new(PartitionMetadata)
+ pmatch.ID = partition
+ tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+ pmatch.Leader = brokerID
+ pmatch.Replicas = replicas
+ pmatch.Isr = isr
+ pmatch.Err = err
+
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md
new file mode 100644
index 0000000..55a6c2e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md
@@ -0,0 +1,13 @@
+# sarama/mocks
+
+The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
+You can use them to test your sarama applications using dependency injection.
+
+The following mock objects are available:
+
+- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
+- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
+- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
+
+The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
+and the results will be reported to the `*testing.T` object you provided when creating the mock.
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go
new file mode 100644
index 0000000..6ccf1f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go
@@ -0,0 +1,142 @@
+package mocks
+
+import (
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+// AsyncProducer implements sarama's Producer interface for testing purposes.
+// Before you can send messages to it's Input channel, you have to set expectations
+// so it knows how to handle the input. This way you can easily test success and
+// failure scenarios.
+type AsyncProducer struct {
+ l sync.Mutex
+ t ErrorReporter
+ expectations []*producerExpectation
+ closed chan struct{}
+ input chan *sarama.ProducerMessage
+ successes chan *sarama.ProducerMessage
+ errors chan *sarama.ProducerError
+ lastOffset int64
+}
+
+// NewAsyncProducer instantiates a new Producer mock. The t argument should
+// be the *testing.T instance of your test method. An error will be written to it if
+// an expectation is violated. The config argument is used to determine whether it
+// should ack successes on the Successes channel.
+func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
+ if config == nil {
+ config = sarama.NewConfig()
+ }
+ mp := &AsyncProducer{
+ t: t,
+ closed: make(chan struct{}, 0),
+ expectations: make([]*producerExpectation, 0),
+ input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
+ successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
+ errors: make(chan *sarama.ProducerError, config.ChannelBufferSize),
+ }
+
+ go func() {
+ defer func() {
+ close(mp.successes)
+ close(mp.errors)
+ }()
+
+ for msg := range mp.input {
+ mp.l.Lock()
+ if mp.expectations == nil || len(mp.expectations) == 0 {
+ mp.expectations = nil
+ mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
+ } else {
+ expectation := mp.expectations[0]
+ mp.expectations = mp.expectations[1:]
+ if expectation.Result == errProduceSuccess {
+ mp.lastOffset++
+ if config.Producer.Return.Successes {
+ msg.Offset = mp.lastOffset
+ mp.successes <- msg
+ }
+ } else {
+ if config.Producer.Return.Errors {
+ mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
+ }
+ }
+ }
+ mp.l.Unlock()
+ }
+
+ mp.l.Lock()
+ if len(mp.expectations) > 0 {
+ mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
+ }
+ mp.l.Unlock()
+
+ close(mp.closed)
+ }()
+
+ return mp
+}
+
+////////////////////////////////////////////////
+// Implement Producer interface
+////////////////////////////////////////////////
+
+// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
+// By closing a mock producer, you also tell it that no more input will be provided, so it will
+// write an error to the test state if there's any remaining expectations.
+func (mp *AsyncProducer) AsyncClose() {
+ close(mp.input)
+}
+
+// Close corresponds with the Close method of sarama's Producer implementation.
+// By closing a mock producer, you also tell it that no more input will be provided, so it will
+// write an error to the test state if there's any remaining expectations.
+func (mp *AsyncProducer) Close() error {
+ mp.AsyncClose()
+ <-mp.closed
+ return nil
+}
+
+// Input corresponds with the Input method of sarama's Producer implementation.
+// You have to set expectations on the mock producer before writing messages to the Input
+// channel, so it knows how to handle them. If there is no more remaining expectations and
+// a messages is written to the Input channel, the mock producer will write an error to the test
+// state object.
+func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
+ return mp.input
+}
+
+// Successes corresponds with the Successes method of sarama's Producer implementation.
+func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
+ return mp.successes
+}
+
+// Errors corresponds with the Errors method of sarama's Producer implementation.
+func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
+ return mp.errors
+}
+
+////////////////////////////////////////////////
+// Setting expectations
+////////////////////////////////////////////////
+
+// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
+// on the input channel. The mock producer will handle the message as if it is produced successfully,
+// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
+// is set to true.
+func (mp *AsyncProducer) ExpectInputAndSucceed() {
+ mp.l.Lock()
+ defer mp.l.Unlock()
+ mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess})
+}
+
+// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
+// on the input channel. The mock producer will handle the message as if it failed to produce
+// successfully. This means it will make a ProducerError available on the Errors channel.
+func (mp *AsyncProducer) ExpectInputAndFail(err error) {
+ mp.l.Lock()
+ defer mp.l.Unlock()
+ mp.expectations = append(mp.expectations, &producerExpectation{Result: err})
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go
new file mode 100644
index 0000000..a2c394e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go
@@ -0,0 +1,299 @@
+package mocks
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "github.com/Shopify/sarama"
+)
+
+// Consumer implements sarama's Consumer interface for testing purposes.
+// Before you can start consuming from this consumer, you have to register
+// topic/partitions using ExpectConsumePartition, and set expectations on them.
+type Consumer struct {
+ l sync.Mutex
+ t ErrorReporter
+ config *sarama.Config
+ partitionConsumers map[string]map[int32]*PartitionConsumer
+ metadata map[string][]int32
+}
+
+// NewConsumer returns a new mock Consumer instance. The t argument should
+// be the *testing.T instance of your test method. An error will be written to it if
+// an expectation is violated. The config argument is currently unused and can be set to nil.
+func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
+ if config == nil {
+ config = sarama.NewConfig()
+ }
+
+ c := &Consumer{
+ t: t,
+ config: config,
+ partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
+ }
+ return c
+}
+
+///////////////////////////////////////////////////
+// Consumer interface implementation
+///////////////////////////////////////////////////
+
+// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
+// Before you can start consuming a partition, you have to set expectations on it using
+// ExpectConsumePartition. You can only consume a partition once per consumer.
+func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
+ c.t.Errorf("No expectations set for %s/%d", topic, partition)
+ return nil, errOutOfExpectations
+ }
+
+ pc := c.partitionConsumers[topic][partition]
+ if pc.consumed {
+ return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
+ }
+
+ if pc.offset != AnyOffset && pc.offset != offset {
+ c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
+ }
+
+ pc.consumed = true
+ return pc, nil
+}
+
+// Topics returns a list of topics, as registered with SetMetadata
+func (c *Consumer) Topics() ([]string, error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.metadata == nil {
+ c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
+ return nil, sarama.ErrOutOfBrokers
+ }
+
+ var result []string
+ for topic := range c.metadata {
+ result = append(result, topic)
+ }
+ return result, nil
+}
+
+// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
+func (c *Consumer) Partitions(topic string) ([]int32, error) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.metadata == nil {
+ c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
+ return nil, sarama.ErrOutOfBrokers
+ }
+ if c.metadata[topic] == nil {
+ return nil, sarama.ErrUnknownTopicOrPartition
+ }
+
+ return c.metadata[topic], nil
+}
+
+// Close implements the Close method from the sarama.Consumer interface. It will close
+// all registered PartitionConsumer instances.
+func (c *Consumer) Close() error {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ for _, partitions := range c.partitionConsumers {
+ for _, partitionConsumer := range partitions {
+ _ = partitionConsumer.Close()
+ }
+ }
+
+ return nil
+}
+
+///////////////////////////////////////////////////
+// Expectation API
+///////////////////////////////////////////////////
+
+// SetTopicMetadata sets the clusters topic/partition metadata,
+// which will be returned by Topics() and Partitions().
+func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ c.metadata = metadata
+}
+
+// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
+// The registered PartitionConsumer will be returned, so you can set expectations
+// on it using method chanining. Once a topic/partition is registered, you are
+// expected to start consuming it using ConsumePartition. If that doesn't happen,
+// an error will be written to the error reporter once the mock consumer is closed. It will
+// also expect that the
+func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
+ c.l.Lock()
+ defer c.l.Unlock()
+
+ if c.partitionConsumers[topic] == nil {
+ c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
+ }
+
+ if c.partitionConsumers[topic][partition] == nil {
+ c.partitionConsumers[topic][partition] = &PartitionConsumer{
+ t: c.t,
+ topic: topic,
+ partition: partition,
+ offset: offset,
+ messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
+ errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
+ }
+ }
+
+ return c.partitionConsumers[topic][partition]
+}
+
+///////////////////////////////////////////////////
+// PartitionConsumer mock type
+///////////////////////////////////////////////////
+
+// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
+// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
+// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
+// Errors and Messages channel, you should specify what values will be provided on these
+// channels using YieldMessage and YieldError.
+type PartitionConsumer struct {
+ l sync.Mutex
+ t ErrorReporter
+ topic string
+ partition int32
+ offset int64
+ messages chan *sarama.ConsumerMessage
+ errors chan *sarama.ConsumerError
+ singleClose sync.Once
+ consumed bool
+ errorsShouldBeDrained bool
+ messagesShouldBeDrained bool
+ highWaterMarkOffset int64
+}
+
+///////////////////////////////////////////////////
+// PartitionConsumer interface implementation
+///////////////////////////////////////////////////
+
+// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
+func (pc *PartitionConsumer) AsyncClose() {
+ pc.singleClose.Do(func() {
+ close(pc.messages)
+ close(pc.errors)
+ })
+}
+
+// Close implements the Close method from the sarama.PartitionConsumer interface. It will
+// verify whether the partition consumer was actually started.
+func (pc *PartitionConsumer) Close() error {
+ if !pc.consumed {
+ pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
+ return errPartitionConsumerNotStarted
+ }
+
+ if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
+ pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
+ }
+
+ if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
+ pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
+ }
+
+ pc.AsyncClose()
+
+ var (
+ closeErr error
+ wg sync.WaitGroup
+ )
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ var errs = make(sarama.ConsumerErrors, 0)
+ for err := range pc.errors {
+ errs = append(errs, err)
+ }
+
+ if len(errs) > 0 {
+ closeErr = errs
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for _ = range pc.messages {
+ // drain
+ }
+ }()
+
+ wg.Wait()
+ return closeErr
+}
+
+// Errors implements the Errors method from the sarama.PartitionConsumer interface.
+func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
+ return pc.errors
+}
+
+// Messages implements the Messages method from the sarama.PartitionConsumer interface.
+func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
+ return pc.messages
+}
+
+func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
+}
+
+///////////////////////////////////////////////////
+// Expectation API
+///////////////////////////////////////////////////
+
+// YieldMessage will yield a messages Messages channel of this partition consumer
+// when it is consumed. By default, the mock consumer will not verify whether this
+// message was consumed from the Messages channel, because there are legitimate
+// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
+// verify that the channel is empty on close.
+func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
+ pc.l.Lock()
+ defer pc.l.Unlock()
+
+ msg.Topic = pc.topic
+ msg.Partition = pc.partition
+ msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1)
+
+ pc.messages <- msg
+}
+
+// YieldError will yield an error on the Errors channel of this partition consumer
+// when it is consumed. By default, the mock consumer will not verify whether this error was
+// consumed from the Errors channel, because there are legitimate reasons for this
+// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
+// the channel is empty on close.
+func (pc *PartitionConsumer) YieldError(err error) {
+ pc.errors <- &sarama.ConsumerError{
+ Topic: pc.topic,
+ Partition: pc.partition,
+ Err: err,
+ }
+}
+
+// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
+// that the messages channel will be fully drained when Close is called. If this
+// expectation is not met, an error is reported to the error reporter.
+func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
+ pc.messagesShouldBeDrained = true
+}
+
+// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
+// that the errors channel will be fully drained when Close is called. If this
+// expectation is not met, an error is reported to the error reporter.
+func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
+ pc.errorsShouldBeDrained = true
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go
new file mode 100644
index 0000000..96b79bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go
@@ -0,0 +1,43 @@
+/*
+Package mocks provides mocks that can be used for testing applications
+that use Sarama. The mock types provided by this package implement the
+interfaces Sarama exports, so you can use them for dependency injection
+in your tests.
+
+All mock instances require you to set expectations on them before you
+can use them. It will determine how the mock will behave. If an
+expectation is not met, it will make your test fail.
+
+NOTE: this package currently does not fall under the API stability
+guarantee of Sarama as it is still considered experimental.
+*/
+package mocks
+
+import (
+ "errors"
+
+ "github.com/Shopify/sarama"
+)
+
+// ErrorReporter is a simple interface that includes the testing.T methods we use to report
+// expectation violations when using the mock objects.
+type ErrorReporter interface {
+ Errorf(string, ...interface{})
+}
+
+var (
+ errProduceSuccess error = nil
+ errOutOfExpectations = errors.New("No more expectations set on mock")
+ errPartitionConsumerNotStarted = errors.New("The partition consumer was never started")
+)
+
+const AnyOffset int64 = -1000
+
+type producerExpectation struct {
+ Result error
+}
+
+type consumerExpectation struct {
+ Err error
+ Msg *sarama.ConsumerMessage
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go
new file mode 100644
index 0000000..fa86b24
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go
@@ -0,0 +1,94 @@
+package mocks
+
+import (
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+// SyncProducer implements sarama's SyncProducer interface for testing purposes.
+// Before you can use it, you have to set expectations on the mock SyncProducer
+// to tell it how to handle calls to SendMessage, so you can easily test success
+// and failure scenarios.
+type SyncProducer struct {
+ l sync.Mutex
+ t ErrorReporter
+ expectations []*producerExpectation
+ lastOffset int64
+}
+
+// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
+// be the *testing.T instance of your test method. An error will be written to it if
+// an expectation is violated. The config argument is currently unused, but is
+// maintained to be compatible with the async Producer.
+func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
+ return &SyncProducer{
+ t: t,
+ expectations: make([]*producerExpectation, 0),
+ }
+}
+
+////////////////////////////////////////////////
+// Implement SyncProducer interface
+////////////////////////////////////////////////
+
+// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
+// You have to set expectations on the mock producer before calling SendMessage, so it knows
+// how to handle them. If there is no more remaining expectations when SendMessage is called,
+// the mock producer will write an error to the test state object.
+func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+
+ if len(sp.expectations) > 0 {
+ expectation := sp.expectations[0]
+ sp.expectations = sp.expectations[1:]
+
+ if expectation.Result == errProduceSuccess {
+ sp.lastOffset++
+ msg.Offset = sp.lastOffset
+ return 0, msg.Offset, nil
+ } else {
+ return -1, -1, expectation.Result
+ }
+ } else {
+ sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
+ return -1, -1, errOutOfExpectations
+ }
+}
+
+// Close corresponds with the Close method of sarama's SyncProducer implementation.
+// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
+// so it will write an error to the test state if there's any remaining expectations.
+func (sp *SyncProducer) Close() error {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+
+ if len(sp.expectations) > 0 {
+ sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
+ }
+
+ return nil
+}
+
+////////////////////////////////////////////////
+// Setting expectations
+////////////////////////////////////////////////
+
+// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
+// called. The mock producer will handle the message as if it produced successfully, i.e. by
+// returning a valid partition, and offset, and a nil error.
+func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+ sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess})
+}
+
+// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
+// called. The mock producer will handle the message as if it failed to produce
+// successfully, i.e. by returning the provided error.
+func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
+ sp.l.Lock()
+ defer sp.l.Unlock()
+ sp.expectations = append(sp.expectations, &producerExpectation{Result: err})
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 0000000..ba4ac76
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,172 @@
+package sarama
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+type offsetCommitRequestBlock struct {
+ offset int64
+ timestamp int64
+ metadata string
+}
+
+func (r *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(r.offset)
+ if version == 1 {
+ pe.putInt64(r.timestamp)
+ } else if r.timestamp != 0 {
+ Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+ }
+
+ return pe.putString(r.metadata)
+}
+
+func (r *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if r.offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 1 {
+ if r.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ r.metadata, err = pd.getString()
+ return err
+}
+
+type OffsetCommitRequest struct {
+ ConsumerGroup string
+ ConsumerGroupGeneration int32 // v1 or later
+ ConsumerID string // v1 or later
+ RetentionTime int64 // v2 or later
+
+ // Version can be:
+ // - 0 (kafka 0.8.1 and later)
+ // - 1 (kafka 0.8.2 and later)
+ // - 2 (kafka 0.8.3 and later)
+ Version int16
+ blocks map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 2 {
+ return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+ }
+
+ if err := pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ConsumerGroupGeneration)
+ if err := pe.putString(r.ConsumerID); err != nil {
+ return err
+ }
+ } else {
+ if r.ConsumerGroupGeneration != 0 {
+ Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ if r.ConsumerID != "" {
+ Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ }
+
+ if r.Version >= 2 {
+ pe.putInt64(r.RetentionTime)
+ } else if r.RetentionTime != 0 {
+ Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+ }
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder) (err error) {
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.ConsumerID, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ if r.RetentionTime, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetCommitRequestBlock{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ }
+
+ r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 0000000..573a3b6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,73 @@
+package sarama
+
+type OffsetCommitResponse struct {
+ Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, kerror := range partitions {
+ pe.putInt32(partition)
+ pe.putInt16(int16(kerror))
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Errors[name][id] = KError(tmp)
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..30bbbbb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,71 @@
+package sarama
+
+type OffsetFetchRequest struct {
+ ConsumerGroup string
+ Version int16
+ partitions map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 1 {
+ return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+ }
+
+ if err = pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.partitions {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder) (err error) {
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if partitionCount == 0 {
+ return nil
+ }
+ r.partitions = make(map[string][]int32)
+ for i := 0; i < partitionCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ r.partitions[topic] = partitions
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..93078c3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,131 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+ Offset int64
+ Metadata string
+ Err KError
+}
+
+func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
+ r.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ r.Metadata, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(tmp)
+
+ return nil
+}
+
+func (r *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt64(r.Offset)
+
+ err = pe.putString(r.Metadata)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt16(int16(r.Err))
+
+ return nil
+}
+
+type OffsetFetchResponse struct {
+ Blocks map[string]map[int32]*OffsetFetchResponseBlock
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numBlocks == 0 {
+ r.Blocks[name] = nil
+ continue
+ }
+ r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetFetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ partitions := r.Blocks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ partitions[partition] = block
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_manager.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 0000000..880d495
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,515 @@
+package sarama
+
+import (
+ "sync"
+ "time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+ // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+ // It will return an error if this OffsetManager is already managing the given
+ // topic/partition.
+ ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+ // Close stops the OffsetManager from managing offsets. It is required to call
+ // this function before an OffsetManager object passes out of scope, as it
+ // will otherwise leak memory. You must call this after all the
+ // PartitionOffsetManagers are closed.
+ Close() error
+}
+
+type offsetManager struct {
+ client Client
+ conf *Config
+ group string
+
+ lock sync.Mutex
+ poms map[string]map[int32]*partitionOffsetManager
+ boms map[*Broker]*brokerOffsetManager
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ om := &offsetManager{
+ client: client,
+ conf: client.Config(),
+ group: group,
+ poms: make(map[string]map[int32]*partitionOffsetManager),
+ boms: make(map[*Broker]*brokerOffsetManager),
+ }
+
+ return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+ pom, err := om.newPartitionOffsetManager(topic, partition)
+ if err != nil {
+ return nil, err
+ }
+
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ topicManagers := om.poms[topic]
+ if topicManagers == nil {
+ topicManagers = make(map[int32]*partitionOffsetManager)
+ om.poms[topic] = topicManagers
+ }
+
+ if topicManagers[partition] != nil {
+ return nil, ConfigurationError("That topic/partition is already being managed")
+ }
+
+ topicManagers[partition] = pom
+ return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+ return nil
+}
+
+func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom := om.boms[broker]
+ if bom == nil {
+ bom = om.newBrokerOffsetManager(broker)
+ om.boms[broker] = bom
+ }
+
+ bom.refs++
+
+ return bom
+}
+
+func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom.refs--
+
+ if bom.refs == 0 {
+ close(bom.updateSubscriptions)
+ if om.boms[bom.broker] == bom {
+ delete(om.boms, bom.broker)
+ }
+ }
+}
+
+func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.boms, bom.broker)
+}
+
+func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.poms[pom.topic], pom.partition)
+ if len(om.poms[pom.topic]) == 0 {
+ delete(om.poms, pom.topic)
+ }
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+ // NextOffset returns the next offset that should be consumed for the managed
+ // partition, accompanied by metadata which can be used to reconstruct the state
+ // of the partition consumer when it resumes. NextOffset() will return
+ // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+ // was committed for this partition yet.
+ NextOffset() (int64, string)
+
+ // MarkOffset marks the provided offset as processed, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(offset int64, metadata string)
+
+ // Errors returns a read channel of errors that occur during offset management, if
+ // enabled. By default, errors are logged and not returned over this channel. If
+ // you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+ // return immediately, after which you should wait until the 'errors' channel has
+ // been drained and closed. It is required to call this function, or Close before
+ // a consumer object passes out of scope, as it will otherwise leak memory. You
+ // must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionOffsetManager from managing offsets. It is required to
+ // call this function (or AsyncClose) before a PartitionOffsetManager object
+ // passes out of scope, as it will otherwise leak memory. You must call this
+ // before calling Close on the underlying client.
+ Close() error
+}
+
+type partitionOffsetManager struct {
+ parent *offsetManager
+ topic string
+ partition int32
+
+ lock sync.Mutex
+ offset int64
+ metadata string
+ dirty bool
+ clean chan none
+ broker *brokerOffsetManager
+
+ errors chan *ConsumerError
+ rebalance chan none
+ dying chan none
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+ pom := &partitionOffsetManager{
+ parent: om,
+ topic: topic,
+ partition: partition,
+ clean: make(chan none),
+ errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
+ rebalance: make(chan none, 1),
+ dying: make(chan none),
+ }
+
+ if err := pom.selectBroker(); err != nil {
+ return nil, err
+ }
+
+ if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
+ return nil, err
+ }
+
+ pom.broker.updateSubscriptions <- pom
+
+ go withRecover(pom.mainLoop)
+
+ return pom, nil
+}
+
+func (pom *partitionOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-pom.rebalance:
+ if err := pom.selectBroker(); err != nil {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ } else {
+ pom.broker.updateSubscriptions <- pom
+ }
+ case <-pom.dying:
+ if pom.broker != nil {
+ select {
+ case <-pom.rebalance:
+ case pom.broker.updateSubscriptions <- pom:
+ }
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ }
+ pom.parent.abandonPartitionOffsetManager(pom)
+ close(pom.errors)
+ return
+ }
+ }
+}
+
+func (pom *partitionOffsetManager) selectBroker() error {
+ if pom.broker != nil {
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ pom.broker = nil
+ }
+
+ var broker *Broker
+ var err error
+
+ if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ pom.broker = pom.parent.refBrokerOffsetManager(broker)
+ return nil
+}
+
+func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
+ request := new(OffsetFetchRequest)
+ request.Version = 1
+ request.ConsumerGroup = pom.parent.group
+ request.AddPartition(pom.topic, pom.partition)
+
+ response, err := pom.broker.broker.FetchOffset(request)
+ if err != nil {
+ return err
+ }
+
+ block := response.GetBlock(pom.topic, pom.partition)
+ if block == nil {
+ return ErrIncompleteResponse
+ }
+
+ switch block.Err {
+ case ErrNoError:
+ pom.offset = block.Offset
+ pom.metadata = block.Metadata
+ return nil
+ case ErrNotCoordinatorForConsumer:
+ if retries <= 0 {
+ return block.Err
+ }
+ if err := pom.selectBroker(); err != nil {
+ return err
+ }
+ return pom.fetchInitialOffset(retries - 1)
+ case ErrOffsetsLoadInProgress:
+ if retries <= 0 {
+ return block.Err
+ }
+ time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
+ return pom.fetchInitialOffset(retries - 1)
+ default:
+ return block.Err
+ }
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+ cErr := &ConsumerError{
+ Topic: pom.topic,
+ Partition: pom.partition,
+ Err: err,
+ }
+
+ if pom.parent.conf.Consumer.Return.Errors {
+ pom.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+ return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset > pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset == offset && pom.metadata == metadata {
+ pom.dirty = false
+
+ select {
+ case pom.clean <- none{}:
+ default:
+ }
+ }
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset >= 0 {
+ return pom.offset + 1, pom.metadata
+ }
+
+ return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+ go func() {
+ pom.lock.Lock()
+ dirty := pom.dirty
+ pom.lock.Unlock()
+
+ if dirty {
+ <-pom.clean
+ }
+
+ close(pom.dying)
+ }()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+ pom.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range pom.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+// Broker Offset Manager
+
+type brokerOffsetManager struct {
+ parent *offsetManager
+ broker *Broker
+ timer *time.Ticker
+ updateSubscriptions chan *partitionOffsetManager
+ subscriptions map[*partitionOffsetManager]none
+ refs int
+}
+
+func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ bom := &brokerOffsetManager{
+ parent: om,
+ broker: broker,
+ timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
+ updateSubscriptions: make(chan *partitionOffsetManager),
+ subscriptions: make(map[*partitionOffsetManager]none),
+ }
+
+ go withRecover(bom.mainLoop)
+
+ return bom
+}
+
+func (bom *brokerOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-bom.timer.C:
+ if len(bom.subscriptions) > 0 {
+ bom.flushToBroker()
+ }
+ case s, ok := <-bom.updateSubscriptions:
+ if !ok {
+ bom.timer.Stop()
+ return
+ }
+ if _, ok := bom.subscriptions[s]; ok {
+ delete(bom.subscriptions, s)
+ } else {
+ bom.subscriptions[s] = none{}
+ }
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) flushToBroker() {
+ request := bom.constructRequest()
+ if request == nil {
+ return
+ }
+
+ response, err := bom.broker.CommitOffset(request)
+
+ if err != nil {
+ bom.abort(err)
+ return
+ }
+
+ for s := range bom.subscriptions {
+ if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
+ continue
+ }
+
+ var err KError
+ var ok bool
+
+ if response.Errors[s.topic] == nil {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+ if err, ok = response.Errors[s.topic][s.partition]; !ok {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+
+ switch err {
+ case ErrNoError:
+ block := request.blocks[s.topic][s.partition]
+ s.updateCommitted(block.offset, block.metadata)
+ break
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable:
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ default:
+ s.handleError(err)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
+ r := &OffsetCommitRequest{
+ Version: 1,
+ ConsumerGroup: bom.parent.group,
+ }
+
+ for s := range bom.subscriptions {
+ s.lock.Lock()
+ if s.dirty {
+ r.AddBlock(s.topic, s.partition, s.offset, ReceiveTime, s.metadata)
+ }
+ s.lock.Unlock()
+ }
+
+ if len(r.blocks) > 0 {
+ return r
+ }
+
+ return nil
+}
+
+func (bom *brokerOffsetManager) abort(err error) {
+ _ = bom.broker.Close() // we don't care about the error this might return, we already have one
+ bom.parent.abandonBroker(bom)
+
+ for pom := range bom.subscriptions {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ }
+
+ for s := range bom.updateSubscriptions {
+ if _, ok := bom.subscriptions[s]; !ok {
+ s.handleError(err)
+ s.rebalance <- none{}
+ }
+ }
+
+ bom.subscriptions = make(map[*partitionOffsetManager]none)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 0000000..842d5c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,113 @@
+package sarama
+
+type offsetRequestBlock struct {
+ time int64
+ maxOffsets int32
+}
+
+func (r *offsetRequestBlock) encode(pe packetEncoder) error {
+ pe.putInt64(int64(r.time))
+ pe.putInt32(r.maxOffsets)
+ return nil
+}
+
+func (r *offsetRequestBlock) decode(pd packetDecoder) (err error) {
+ if r.time, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if r.maxOffsets, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type OffsetRequest struct {
+ blocks map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ err := pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder) error {
+ // Ignore replica ID
+ if _, err := pd.getInt32(); err != nil {
+ return err
+ }
+ blockCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if blockCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ for i := 0; i < blockCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetRequestBlock{}
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+ return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+ return 0
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ }
+
+ tmp := new(offsetRequestBlock)
+ tmp.time = time
+ tmp.maxOffsets = maxOffsets
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 0000000..07d71ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,130 @@
+package sarama
+
+type OffsetResponseBlock struct {
+ Err KError
+ Offsets []int64
+}
+
+func (r *OffsetResponseBlock) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(tmp)
+
+ r.Offsets, err = pd.getInt64Array()
+
+ return err
+}
+
+func (r *OffsetResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(r.Err))
+
+ return pe.putInt64Array(r.Offsets)
+}
+
+type OffsetResponse struct {
+ Blocks map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1]
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+ if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*OffsetResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}}
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 0000000..28670c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,45 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+ // Primitives
+ getInt8() (int8, error)
+ getInt16() (int16, error)
+ getInt32() (int32, error)
+ getInt64() (int64, error)
+ getArrayLength() (int, error)
+
+ // Collections
+ getBytes() ([]byte, error)
+ getString() (string, error)
+ getInt32Array() ([]int32, error)
+ getInt64Array() ([]int64, error)
+ getStringArray() ([]string, error)
+
+ // Subsets
+ remaining() int
+ getSubset(length int) (packetDecoder, error)
+
+ // Stacks, see PushDecoder
+ push(in pushDecoder) error
+ pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+ // Saves the offset into the input buffer as the location to actually read the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and check the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+ // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+ check(curOffset int, buf []byte) error
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 0000000..0df6e24
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,42 @@
+package sarama
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+ // Primitives
+ putInt8(in int8)
+ putInt16(in int16)
+ putInt32(in int32)
+ putInt64(in int64)
+ putArrayLength(in int) error
+
+ // Collections
+ putBytes(in []byte) error
+ putRawBytes(in []byte) error
+ putString(in string) error
+ putStringArray(in []string) error
+ putInt32Array(in []int32) error
+ putInt64Array(in []int64) error
+
+ // Stacks, see PushEncoder
+ push(in pushEncoder)
+ pop() error
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+ // Saves the offset into the input buffer as the location to actually write the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and write the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+ // of data to the saved offset, based on the data between the saved offset and curOffset.
+ run(curOffset int, buf []byte) error
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 0000000..8c2bca4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,123 @@
+package sarama
+
+import (
+ "hash"
+ "hash/fnv"
+ "math/rand"
+ "time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+ // Partition takes a message and partition count and chooses a partition
+ Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+ // RequiresConsistency indicates to the user of the partitioner whether the
+ // mapping of key->partition is consistent or not. Specifically, if a
+ // partitioner requires consistency then it must be allowed to choose from all
+ // partitions (even ones known to be unavailable), and its choice must be
+ // respected by the caller. The obvious example is the HashPartitioner.
+ RequiresConsistency() bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+ return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+type randomPartitioner struct {
+ generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+ p := new(randomPartitioner)
+ p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+ return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type roundRobinPartitioner struct {
+ partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+ return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if p.partition >= numPartitions {
+ p.partition = 0
+ }
+ ret := p.partition
+ p.partition++
+ return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type hashPartitioner struct {
+ random Partitioner
+ hasher hash.Hash32
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil, or fails to
+// encode, then a random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key
+// is used, modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if message.Key == nil {
+ return p.random.Partition(message, numPartitions)
+ }
+ bytes, err := message.Key.Encode()
+ if err != nil {
+ return -1, err
+ }
+ p.hasher.Reset()
+ _, err = p.hasher.Write(bytes)
+ if err != nil {
+ return -1, err
+ }
+ hash := int32(p.hasher.Sum32())
+ if hash < 0 {
+ hash = -hash
+ }
+ return hash % numPartitions, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 0000000..8c6ba85
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,110 @@
+package sarama
+
+import (
+ "fmt"
+ "math"
+)
+
+type prepEncoder struct {
+ length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+ pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+ if in > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+ }
+ pe.length += 4
+ return nil
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+ pe.length += 4
+ if in == nil {
+ return nil
+ }
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putString(in string) error {
+ pe.length += 2
+ if len(in) > math.MaxInt16 {
+ return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 8 * len(in)
+ return nil
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+ pe.length += in.reserveLength()
+}
+
+func (pe *prepEncoder) pop() error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 0000000..473513c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,145 @@
+package sarama
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+ // NoResponse doesn't send any response, the TCP ACK is all you get.
+ NoResponse RequiredAcks = 0
+ // WaitForLocal waits for only the local commit to succeed before responding.
+ WaitForLocal RequiredAcks = 1
+ // WaitForAll waits for all replicas to commit before responding.
+ WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+ RequiredAcks RequiredAcks
+ Timeout int32
+ msgSets map[string]map[int32]*MessageSet
+}
+
+func (p *ProduceRequest) encode(pe packetEncoder) error {
+ pe.putInt16(int16(p.RequiredAcks))
+ pe.putInt32(p.Timeout)
+ err := pe.putArrayLength(len(p.msgSets))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range p.msgSets {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, msgSet := range partitions {
+ pe.putInt32(id)
+ pe.push(&lengthField{})
+ err = msgSet.encode(pe)
+ if err != nil {
+ return err
+ }
+ err = pe.pop()
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (p *ProduceRequest) decode(pd packetDecoder) error {
+ requiredAcks, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ p.RequiredAcks = RequiredAcks(requiredAcks)
+ if p.Timeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ p.msgSets = make(map[string]map[int32]*MessageSet)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ p.msgSets[topic] = make(map[int32]*MessageSet)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ messageSetSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ msgSetDecoder, err := pd.getSubset(int(messageSetSize))
+ if err != nil {
+ return err
+ }
+ msgSet := &MessageSet{}
+ err = msgSet.decode(msgSetDecoder)
+ if err != nil {
+ return err
+ }
+ p.msgSets[topic][partition] = msgSet
+ }
+ }
+ return nil
+}
+
+func (p *ProduceRequest) key() int16 {
+ return 0
+}
+
+func (p *ProduceRequest) version() int16 {
+ return 0
+}
+
+func (p *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+ if p.msgSets == nil {
+ p.msgSets = make(map[string]map[int32]*MessageSet)
+ }
+
+ if p.msgSets[topic] == nil {
+ p.msgSets[topic] = make(map[int32]*MessageSet)
+ }
+
+ set := p.msgSets[topic][partition]
+
+ if set == nil {
+ set = new(MessageSet)
+ p.msgSets[topic][partition] = set
+ }
+
+ set.addMessage(msg)
+}
+
+func (p *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+ if p.msgSets == nil {
+ p.msgSets = make(map[string]map[int32]*MessageSet)
+ }
+
+ if p.msgSets[topic] == nil {
+ p.msgSets[topic] = make(map[int32]*MessageSet)
+ }
+
+ p.msgSets[topic][partition] = set
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 0000000..1f49a85
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,112 @@
+package sarama
+
+type ProduceResponseBlock struct {
+ Err KError
+ Offset int64
+}
+
+func (pr *ProduceResponseBlock) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pr.Err = KError(tmp)
+
+ pr.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type ProduceResponse struct {
+ Blocks map[string]map[int32]*ProduceResponseBlock
+}
+
+func (pr *ProduceResponse) decode(pd packetDecoder) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ pr.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(ProduceResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ pr.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (pr *ProduceResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(pr.Blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range pr.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, prb := range partitions {
+ pe.putInt32(id)
+ pe.putInt16(int16(prb.Err))
+ pe.putInt64(prb.Offset)
+ }
+ }
+ return nil
+}
+
+func (pr *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+ if pr.Blocks == nil {
+ return nil
+ }
+
+ if pr.Blocks[topic] == nil {
+ return nil
+ }
+
+ return pr.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (pr *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+ if pr.Blocks == nil {
+ pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+ }
+ byTopic, ok := pr.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*ProduceResponseBlock)
+ pr.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &ProduceResponseBlock{Err: err}
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_set.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 0000000..9fe5f79
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,158 @@
+package sarama
+
+import "time"
+
+type partitionSet struct {
+ msgs []*ProducerMessage
+ setToSend *MessageSet
+ bufferBytes int
+}
+
+type produceSet struct {
+ parent *asyncProducer
+ msgs map[string]map[int32]*partitionSet
+
+ bufferBytes int
+ bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+ return &produceSet{
+ msgs: make(map[string]map[int32]*partitionSet),
+ parent: parent,
+ }
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+ var err error
+ var key, val []byte
+
+ if msg.Key != nil {
+ if key, err = msg.Key.Encode(); err != nil {
+ return err
+ }
+ }
+
+ if msg.Value != nil {
+ if val, err = msg.Value.Encode(); err != nil {
+ return err
+ }
+ }
+
+ partitions := ps.msgs[msg.Topic]
+ if partitions == nil {
+ partitions = make(map[int32]*partitionSet)
+ ps.msgs[msg.Topic] = partitions
+ }
+
+ set := partitions[msg.Partition]
+ if set == nil {
+ set = &partitionSet{setToSend: new(MessageSet)}
+ partitions[msg.Partition] = set
+ }
+
+ set.msgs = append(set.msgs, msg)
+ set.setToSend.addMessage(&Message{Codec: CompressionNone, Key: key, Value: val})
+
+ size := producerMessageOverhead + len(key) + len(val)
+ set.bufferBytes += size
+ ps.bufferBytes += size
+ ps.bufferCount++
+
+ return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+ req := &ProduceRequest{
+ RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+ Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+ }
+
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ if ps.parent.conf.Producer.Compression == CompressionNone {
+ req.AddSet(topic, partition, set.setToSend)
+ } else {
+ // When compression is enabled, the entire set for each partition is compressed
+ // and sent as the payload of a single fake "message" with the appropriate codec
+ // set and no key. When the server sees a message with a compression codec, it
+ // decompresses the payload and treats the result as its message set.
+ payload, err := encode(set.setToSend)
+ if err != nil {
+ Logger.Println(err) // if this happens, it's basically our fault.
+ panic(err)
+ }
+ req.AddMessage(topic, partition, &Message{
+ Codec: ps.parent.conf.Producer.Compression,
+ Key: nil,
+ Value: payload,
+ })
+ }
+ }
+ }
+
+ return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ cb(topic, partition, set.msgs)
+ }
+ }
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+ if ps.msgs[topic] == nil {
+ return nil
+ }
+ set := ps.msgs[topic][partition]
+ if set == nil {
+ return nil
+ }
+ ps.bufferBytes -= set.bufferBytes
+ ps.bufferCount -= len(set.msgs)
+ delete(ps.msgs[topic], partition)
+ return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+ switch {
+ // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+ case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
+ return true
+ // Would we overflow the size-limit of a compressed message-batch for this partition?
+ case ps.parent.conf.Producer.Compression != CompressionNone &&
+ ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+ ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
+ return true
+ // Would we overflow simply in number of messages?
+ case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) readyToFlush() bool {
+ switch {
+ // If we don't have any messages, nothing else matters
+ case ps.empty():
+ return false
+ // If all three config values are 0, we always flush as-fast-as-possible
+ case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+ return true
+ // If we've passed the message trigger-point
+ case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+ return true
+ // If we've passed the byte trigger-point
+ case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) empty() bool {
+ return ps.bufferCount == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 0000000..e3ea331
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,254 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+type realDecoder struct {
+ raw []byte
+ off int
+ stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+ if rd.remaining() < 1 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int8(rd.raw[rd.off])
+ rd.off++
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+ if rd.remaining() < 2 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+ rd.off += 2
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ if tmp > rd.remaining() {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ } else if tmp > 2*math.MaxUint16 {
+ return -1, PacketDecodingError{"invalid array length"}
+ }
+ return tmp, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+ tmp, err := rd.getInt32()
+
+ if err != nil {
+ return nil, err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return nil, PacketDecodingError{"invalid byteslice length"}
+ case n == -1:
+ return nil, nil
+ case n == 0:
+ return make([]byte, 0), nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ tmpStr := rd.raw[rd.off : rd.off+n]
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+ tmp, err := rd.getInt16()
+
+ if err != nil {
+ return "", err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return "", PacketDecodingError{"invalid string length"}
+ case n == -1:
+ return "", nil
+ case n == 0:
+ return "", nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return "", ErrInsufficientData
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 4*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, PacketDecodingError{"invalid array length"}
+ }
+
+ ret := make([]int32, n)
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 8*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, PacketDecodingError{"invalid array length"}
+ }
+
+ ret := make([]int64, n)
+ for i := range ret {
+ ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, PacketDecodingError{"invalid array length"}
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ if str, err := rd.getString(); err != nil {
+ return nil, err
+ } else {
+ ret[i] = str
+ }
+ }
+ return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+ return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+ if length < 0 {
+ return nil, PacketDecodingError{"invalid subset size"}
+ } else if length > rd.remaining() {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ start := rd.off
+ rd.off += length
+ return &realDecoder{raw: rd.raw[start:rd.off]}, nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+ in.saveOffset(rd.off)
+
+ reserve := in.reserveLength()
+ if rd.remaining() < reserve {
+ rd.off = len(rd.raw)
+ return ErrInsufficientData
+ }
+
+ rd.stack = append(rd.stack, in)
+
+ rd.off += reserve
+
+ return nil
+}
+
+func (rd *realDecoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := rd.stack[len(rd.stack)-1]
+ rd.stack = rd.stack[:len(rd.stack)-1]
+
+ return in.check(rd.off, rd.raw)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 0000000..076fdd0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,115 @@
+package sarama
+
+import "encoding/binary"
+
+type realEncoder struct {
+ raw []byte
+ off int
+ stack []pushEncoder
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+ re.raw[re.off] = byte(in)
+ re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+ binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+ re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+ binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+ re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+ re.putInt32(int32(in))
+ return nil
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ re.putInt32(int32(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putString(in string) error {
+ re.putInt16(int16(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt64(val)
+ }
+ return nil
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+ in.saveOffset(re.off)
+ re.off += in.reserveLength()
+ re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := re.stack[len(re.stack)-1]
+ re.stack = re.stack[:len(re.stack)-1]
+
+ return in.run(re.off, re.raw)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/request.go
new file mode 100644
index 0000000..b9f654b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/request.go
@@ -0,0 +1,112 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+type requestBody interface {
+ encoder
+ decoder
+ key() int16
+ version() int16
+}
+
+type request struct {
+ correlationID int32
+ clientID string
+ body requestBody
+}
+
+func (r *request) encode(pe packetEncoder) (err error) {
+ pe.push(&lengthField{})
+ pe.putInt16(r.body.key())
+ pe.putInt16(r.body.version())
+ pe.putInt32(r.correlationID)
+ err = pe.putString(r.clientID)
+ if err != nil {
+ return err
+ }
+ err = r.body.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+ var key int16
+ if key, err = pd.getInt16(); err != nil {
+ return err
+ }
+ var version int16
+ if version, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if r.correlationID, err = pd.getInt32(); err != nil {
+ return err
+ }
+ r.clientID, err = pd.getString()
+
+ r.body = allocateBody(key, version)
+ if r.body == nil {
+ return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+ }
+ return r.body.decode(pd)
+}
+
+func decodeRequest(r io.Reader) (req *request, err error) {
+ lengthBytes := make([]byte, 4)
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, err
+ }
+
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+ if length <= 4 || length > MaxRequestSize {
+ return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, err
+ }
+
+ req = &request{}
+ if err := decode(encodedReq, req); err != nil {
+ return nil, err
+ }
+ return req, nil
+}
+
+func allocateBody(key, version int16) requestBody {
+ switch key {
+ case 0:
+ return &ProduceRequest{}
+ case 1:
+ return &FetchRequest{}
+ case 2:
+ return &OffsetRequest{}
+ case 3:
+ return &MetadataRequest{}
+ case 8:
+ return &OffsetCommitRequest{Version: version}
+ case 9:
+ return &OffsetFetchRequest{}
+ case 10:
+ return &ConsumerMetadataRequest{}
+ case 11:
+ return &JoinGroupRequest{}
+ case 12:
+ return &HeartbeatRequest{}
+ case 13:
+ return &LeaveGroupRequest{}
+ case 14:
+ return &SyncGroupRequest{}
+ case 15:
+ return &DescribeGroupsRequest{}
+ case 16:
+ return &ListGroupsRequest{}
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 0000000..f3f4d27
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+ length int32
+ correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+ r.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if r.length <= 4 || r.length > MaxResponseSize {
+ return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+ }
+
+ r.correlationID, err = pd.getInt32()
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 0000000..d598217
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,47 @@
+/*
+Package sarama provides client libraries for the Kafka 0.8 protocol. The AsyncProducer object is the high-level
+API for producing messages asynchronously; the SyncProducer provides a blocking API for the same purpose.
+The Consumer object is the high-level API for consuming messages. The Client object provides metadata
+management functionality that is shared between the higher-level objects.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire.
+
+The Request/Response objects and properties are mostly undocumented, as they line up exactly with the
+protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+*/
+package sarama
+
+import (
+ "io/ioutil"
+ "log"
+)
+
+// Logger is the instance of a StdLogger interface that Sarama writes connection
+// management events to. By default it is set to discard all log messages via ioutil.Discard,
+// but you can set it to redirect wherever you want.
+var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+var PanicHandler func(interface{})
+
+// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+// to process.
+var MaxRequestSize int32 = 100 * 1024 * 1024
+
+// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go
new file mode 100644
index 0000000..e86cb70
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go
@@ -0,0 +1,41 @@
+package sarama
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/golang/snappy"
+)
+
+var snappyMagic = []byte{130, 83, 78, 65, 80, 80, 89, 0}
+
+// SnappyEncode encodes binary data
+func snappyEncode(src []byte) []byte {
+ return snappy.Encode(nil, src)
+}
+
+// SnappyDecode decodes snappy data
+func snappyDecode(src []byte) ([]byte, error) {
+ if bytes.Equal(src[:8], snappyMagic) {
+ var (
+ pos = uint32(16)
+ max = uint32(len(src))
+ dst = make([]byte, 0, len(src))
+ chunk []byte
+ err error
+ )
+ for pos < max {
+ size := binary.BigEndian.Uint32(src[pos : pos+4])
+ pos += 4
+
+ chunk, err = snappy.Decode(chunk, src[pos:pos+size])
+ if err != nil {
+ return nil, err
+ }
+ pos += size
+ dst = append(dst, chunk...)
+ }
+ return dst, nil
+ }
+ return snappy.Decode(nil, src)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_group_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 0000000..60be6f3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,86 @@
+package sarama
+
+type SyncGroupRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+ GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+ return err
+ }
+ for memberId, memberAssignment := range r.GroupAssignments {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := pe.putBytes(memberAssignment); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupAssignments = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ memberAssignment, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+ if r.GroupAssignments == nil {
+ r.GroupAssignments = make(map[string][]byte)
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_group_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 0000000..e10685e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,22 @@
+package sarama
+
+type SyncGroupResponse struct {
+ Err KError
+ MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder) (err error) {
+ if kerr, err := pd.getInt16(); err != nil {
+ return err
+ } else {
+ r.Err = KError(kerr)
+ }
+
+ r.MemberAssignment, err = pd.getBytes()
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 0000000..69a26d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,95 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages. It routes messages to the correct broker, refreshing metadata as appropriate,
+// and parses responses for errors. You must call Close() on a producer to avoid leaks, it may not be garbage-collected automatically when
+// it passes out of scope.
+type SyncProducer interface {
+
+ // SendMessage produces a given message, and returns only when it either has
+ // succeeded or failed to produce. It will return the partition and the offset
+ // of the produced message, or an error if the message failed to produce.
+ SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+ // Close shuts down the producer and flushes any messages it may have buffered.
+ // You must call this function before a producer object passes out of scope, as
+ // it may otherwise leak memory. You must call this before calling Close on the
+ // underlying client.
+ Close() error
+}
+
+type syncProducer struct {
+ producer *asyncProducer
+ wg sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+ p, err := NewAsyncProducer(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+ p.conf.Producer.Return.Successes = true
+ p.conf.Producer.Return.Errors = true
+ sp := &syncProducer{producer: p}
+
+ sp.wg.Add(2)
+ go withRecover(sp.handleSuccesses)
+ go withRecover(sp.handleErrors)
+
+ return sp
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+ oldMetadata := msg.Metadata
+ defer func() {
+ msg.Metadata = oldMetadata
+ }()
+
+ expectation := make(chan error, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+
+ if err := <-expectation; err != nil {
+ return -1, -1, err
+ }
+
+ return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+ defer sp.wg.Done()
+ for msg := range sp.producer.Successes() {
+ expectation := msg.Metadata.(chan error)
+ expectation <- nil
+ }
+}
+
+func (sp *syncProducer) handleErrors() {
+ defer sp.wg.Done()
+ for err := range sp.producer.Errors() {
+ expectation := err.Msg.Metadata.(chan error)
+ expectation <- err.Err
+ }
+}
+
+func (sp *syncProducer) Close() error {
+ sp.producer.AsyncClose()
+ sp.wg.Wait()
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md
new file mode 100644
index 0000000..3464c4a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md
@@ -0,0 +1,10 @@
+# Sarama tools
+
+This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
+Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
+
+- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
+- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
+- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
+
+To install all tools, run `go get github.com/Shopify/sarama/tools/...`
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
new file mode 100644
index 0000000..67da9df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
@@ -0,0 +1,2 @@
+kafka-console-consumer
+kafka-console-consumer.test
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
new file mode 100644
index 0000000..4e77f0b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
@@ -0,0 +1,29 @@
+# kafka-console-consumer
+
+A simple command line tool to consume partitions of a topic and print the
+messages on the standard output.
+
+### Installation
+
+ go get github.com/Shopify/sarama/tools/kafka-console-consumer
+
+### Usage
+
+ # Minimum invocation
+ kafka-console-consumer -topic=test -brokers=kafka1:9092
+
+ # It will pick up a KAFKA_PEERS environment variable
+ export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
+ kafka-console-consumer -topic=test
+
+ # You can specify the offset you want to start at. It can be either
+ # `oldest`, `newest`. The default is `newest`.
+ kafka-console-consumer -topic=test -offset=oldest
+ kafka-console-consumer -topic=test -offset=newest
+
+ # You can specify the partition(s) you want to consume as a comma-separated
+ # list. The default is `all`.
+ kafka-console-consumer -topic=test -partitions=1,2,3
+
+ # Display all command line options
+ kafka-console-consumer -help
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
new file mode 100644
index 0000000..0f1eb89
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
@@ -0,0 +1,145 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/Shopify/sarama"
+)
+
+var (
+ brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
+ topic = flag.String("topic", "", "REQUIRED: the topic to consume")
+ partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
+ offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
+ verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
+ bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+func main() {
+ flag.Parse()
+
+ if *brokerList == "" {
+ printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
+ }
+
+ if *topic == "" {
+ printUsageErrorAndExit("-topic is required")
+ }
+
+ if *verbose {
+ sarama.Logger = logger
+ }
+
+ var initialOffset int64
+ switch *offset {
+ case "oldest":
+ initialOffset = sarama.OffsetOldest
+ case "newest":
+ initialOffset = sarama.OffsetNewest
+ default:
+ printUsageErrorAndExit("-offset should be `oldest` or `newest`")
+ }
+
+ c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start consumer: %s", err)
+ }
+
+ partitionList, err := getPartitions(c)
+ if err != nil {
+ printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
+ }
+
+ var (
+ messages = make(chan *sarama.ConsumerMessage, *bufferSize)
+ closing = make(chan struct{})
+ wg sync.WaitGroup
+ )
+
+ go func() {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Kill, os.Interrupt)
+ <-signals
+ logger.Println("Initiating shutdown of consumer...")
+ close(closing)
+ }()
+
+ for _, partition := range partitionList {
+ pc, err := c.ConsumePartition(*topic, partition, initialOffset)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
+ }
+
+ go func(pc sarama.PartitionConsumer) {
+ <-closing
+ pc.AsyncClose()
+ }(pc)
+
+ wg.Add(1)
+ go func(pc sarama.PartitionConsumer) {
+ defer wg.Done()
+ for message := range pc.Messages() {
+ messages <- message
+ }
+ }(pc)
+ }
+
+ go func() {
+ for msg := range messages {
+ fmt.Printf("Partition:\t%d\n", msg.Partition)
+ fmt.Printf("Offset:\t%d\n", msg.Offset)
+ fmt.Printf("Key:\t%s\n", string(msg.Key))
+ fmt.Printf("Value:\t%s\n", string(msg.Value))
+ fmt.Println()
+ }
+ }()
+
+ wg.Wait()
+ logger.Println("Done consuming topic", *topic)
+ close(messages)
+
+ if err := c.Close(); err != nil {
+ logger.Println("Failed to close consumer: ", err)
+ }
+}
+
+func getPartitions(c sarama.Consumer) ([]int32, error) {
+ if *partitions == "all" {
+ return c.Partitions(*topic)
+ }
+
+ tmp := strings.Split(*partitions, ",")
+ var pList []int32
+ for i := range tmp {
+ val, err := strconv.ParseInt(tmp[i], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ pList = append(pList, int32(val))
+ }
+
+ return pList, nil
+}
+
+func printErrorAndExit(code int, format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ os.Exit(code)
+}
+
+func printUsageErrorAndExit(format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ fmt.Fprintln(os.Stderr, "Available command line options:")
+ flag.PrintDefaults()
+ os.Exit(64)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
new file mode 100644
index 0000000..5837fe8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
@@ -0,0 +1,2 @@
+kafka-console-partitionconsumer
+kafka-console-partitionconsumer.test
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
new file mode 100644
index 0000000..646dd5f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
@@ -0,0 +1,28 @@
+# kafka-console-partitionconsumer
+
+NOTE: this tool is deprecated in favour of the more general and more powerful
+`kafka-console-consumer`.
+
+A simple command line tool to consume a partition of a topic and print the messages
+on the standard output.
+
+### Installation
+
+ go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer
+
+### Usage
+
+ # Minimum invocation
+ kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092
+
+ # It will pick up a KAFKA_PEERS environment variable
+ export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
+ kafka-console-partitionconsumer -topic=test -partition=4
+
+ # You can specify the offset you want to start at. It can be either
+ # `oldest`, `newest`, or a specific offset number
+ kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest
+ kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337
+
+ # Display all command line options
+ kafka-console-partitionconsumer -help
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
new file mode 100644
index 0000000..d5e4464
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
@@ -0,0 +1,102 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+
+ "github.com/Shopify/sarama"
+)
+
+var (
+ brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
+ topic = flag.String("topic", "", "REQUIRED: the topic to consume")
+ partition = flag.Int("partition", -1, "REQUIRED: the partition to consume")
+ offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset")
+ verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+func main() {
+ flag.Parse()
+
+ if *brokerList == "" {
+ printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
+ }
+
+ if *topic == "" {
+ printUsageErrorAndExit("-topic is required")
+ }
+
+ if *partition == -1 {
+ printUsageErrorAndExit("-partition is required")
+ }
+
+ if *verbose {
+ sarama.Logger = logger
+ }
+
+ var (
+ initialOffset int64
+ offsetError error
+ )
+ switch *offset {
+ case "oldest":
+ initialOffset = sarama.OffsetOldest
+ case "newest":
+ initialOffset = sarama.OffsetNewest
+ default:
+ initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
+ }
+
+ if offsetError != nil {
+ printUsageErrorAndExit("Invalid initial offset: %s", *offset)
+ }
+
+ c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start consumer: %s", err)
+ }
+
+ pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
+ if err != nil {
+ printErrorAndExit(69, "Failed to start partition consumer: %s", err)
+ }
+
+ go func() {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, os.Kill, os.Interrupt)
+ <-signals
+ pc.AsyncClose()
+ }()
+
+ for msg := range pc.Messages() {
+ fmt.Printf("Offset:\t%d\n", msg.Offset)
+ fmt.Printf("Key:\t%s\n", string(msg.Key))
+ fmt.Printf("Value:\t%s\n", string(msg.Value))
+ fmt.Println()
+ }
+
+ if err := c.Close(); err != nil {
+ logger.Println("Failed to close consumer: ", err)
+ }
+}
+
+func printErrorAndExit(code int, format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ os.Exit(code)
+}
+
+func printUsageErrorAndExit(format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ fmt.Fprintln(os.Stderr, "Available command line options:")
+ flag.PrintDefaults()
+ os.Exit(64)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
new file mode 100644
index 0000000..2b9e563
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
@@ -0,0 +1,2 @@
+kafka-console-producer
+kafka-console-producer.test
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
new file mode 100644
index 0000000..6b3a65f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
@@ -0,0 +1,34 @@
+# kafka-console-producer
+
+A simple command line tool to produce a single message to Kafka.
+
+### Installation
+
+ go get github.com/Shopify/sarama/tools/kafka-console-producer
+
+
+### Usage
+
+ # Minimum invocation
+ kafka-console-producer -topic=test -value=value -brokers=kafka1:9092
+
+ # It will pick up a KAFKA_PEERS environment variable
+ export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
+ kafka-console-producer -topic=test -value=value
+
+ # It will read the value from stdin by using pipes
+ echo "hello world" | kafka-console-producer -topic=test
+
+ # Specify a key:
+ echo "hello world" | kafka-console-producer -topic=test -key=key
+
+ # Partitioning: by default, kafka-console-producer will partition as follows:
+ # - manual partitioning if a -partition is provided
+ # - hash partitioning by key if a -key is provided
+ # - random partioning otherwise.
+ #
+ # You can override this using the -partitioner argument:
+ echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random
+
+ # Display all command line options
+ kafka-console-producer -help
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
new file mode 100644
index 0000000..6a1765d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
@@ -0,0 +1,118 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+
+ "github.com/Shopify/sarama"
+)
+
+var (
+ brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable")
+ topic = flag.String("topic", "", "REQUIRED: the topic to produce to")
+ key = flag.String("key", "", "The key of the message to produce. Can be empty.")
+ value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.")
+ partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`")
+ partition = flag.Int("partition", -1, "The partition to produce to.")
+ verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr")
+ silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout")
+
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+)
+
+func main() {
+ flag.Parse()
+
+ if *brokerList == "" {
+ printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable")
+ }
+
+ if *topic == "" {
+ printUsageErrorAndExit("no -topic specified")
+ }
+
+ if *verbose {
+ sarama.Logger = logger
+ }
+
+ config := sarama.NewConfig()
+ config.Producer.RequiredAcks = sarama.WaitForAll
+
+ switch *partitioner {
+ case "":
+ if *partition >= 0 {
+ config.Producer.Partitioner = sarama.NewManualPartitioner
+ } else {
+ config.Producer.Partitioner = sarama.NewHashPartitioner
+ }
+ case "hash":
+ config.Producer.Partitioner = sarama.NewHashPartitioner
+ case "random":
+ config.Producer.Partitioner = sarama.NewRandomPartitioner
+ case "manual":
+ config.Producer.Partitioner = sarama.NewManualPartitioner
+ if *partition == -1 {
+ printUsageErrorAndExit("-partition is required when partitioning manually")
+ }
+ default:
+ printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner))
+ }
+
+ message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
+
+ if *key != "" {
+ message.Key = sarama.StringEncoder(*key)
+ }
+
+ if *value != "" {
+ message.Value = sarama.StringEncoder(*value)
+ } else if stdinAvailable() {
+ bytes, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ printErrorAndExit(66, "Failed to read data from the standard input: %s", err)
+ }
+ message.Value = sarama.ByteEncoder(bytes)
+ } else {
+ printUsageErrorAndExit("-value is required, or you have to provide the value on stdin")
+ }
+
+ producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
+ if err != nil {
+ printErrorAndExit(69, "Failed to open Kafka producer: %s", err)
+ }
+ defer func() {
+ if err := producer.Close(); err != nil {
+ logger.Println("Failed to close Kafka producer cleanly:", err)
+ }
+ }()
+
+ partition, offset, err := producer.SendMessage(message)
+ if err != nil {
+ printErrorAndExit(69, "Failed to produce message: %s", err)
+ } else if !*silent {
+ fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
+ }
+}
+
+func printErrorAndExit(code int, format string, values ...interface{}) {
+ fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
+ fmt.Fprintln(os.Stderr)
+ os.Exit(code)
+}
+
+func printUsageErrorAndExit(message string) {
+ fmt.Fprintln(os.Stderr, "ERROR:", message)
+ fmt.Fprintln(os.Stderr)
+ fmt.Fprintln(os.Stderr, "Available command line options:")
+ flag.PrintDefaults()
+ os.Exit(64)
+}
+
+func stdinAvailable() bool {
+ stat, _ := os.Stdin.Stat()
+ return (stat.Mode() & os.ModeCharDevice) == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go b/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go
new file mode 100644
index 0000000..fef7c73
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,89 @@
+package sarama
+
+import "sort"
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+ return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+ return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupeAndSort(input []int32) []int32 {
+ ret := make([]int32, 0, len(input))
+ for _, val := range input {
+ ret = append(ret, val)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func withRecover(fn func()) {
+ defer func() {
+ handler := PanicHandler
+ if handler != nil {
+ if err := recover(); err != nil {
+ handler(err)
+ }
+ }
+ }()
+
+ fn()
+}
+
+func safeAsyncClose(b *Broker) {
+ tmp := b // local var prevents clobbering in goroutine
+ go withRecover(func() {
+ if connected, _ := tmp.Connected(); connected {
+ if err := tmp.Close(); err != nil {
+ Logger.Println("Error closing broker", tmp.ID(), ":", err)
+ }
+ }
+ })
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+ Encode() ([]byte, error)
+ Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+ return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+ return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+ return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+ return len(b)
+}
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh
new file mode 100644
index 0000000..95e47dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+set -ex
+
+# Launch and wait for toxiproxy
+${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh &
+while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid
+done
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf
new file mode 100644
index 0000000..d975de4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf
@@ -0,0 +1,5 @@
+start on started zookeeper-ZK_PORT
+stop on stopping zookeeper-ZK_PORT
+
+pre-start exec sleep 2
+exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh
new file mode 100644
index 0000000..b1e429f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -ex
+
+apt-get update
+yes | apt-get install default-jre
+
+export KAFKA_INSTALL_ROOT=/opt
+export KAFKA_HOSTNAME=192.168.100.67
+export KAFKA_VERSION=0.9.0.0
+export REPOSITORY_ROOT=/vagrant
+
+sh /vagrant/vagrant/install_cluster.sh
+sh /vagrant/vagrant/setup_services.sh
+sh /vagrant/vagrant/create_topics.sh
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
new file mode 100644
index 0000000..e52c00e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+set -ex
+
+${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 &
+PID=$!
+
+while ! nc -q 1 localhost 8474
+
+# The number of threads handling network requests
+num.network.threads=2
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=1048576
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=1048576
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=KAFKA_DATADIR
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=2
+
+# Create new topics with a replication factor of 2 so failover can be tested
+# more easily.
+default.replication.factor=2
+
+auto.create.topics.enable=false
+delete.topic.enable=true
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
+# segments don't drop below log.retention.bytes.
+log.retention.bytes=268435456
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.segment.bytes=268435456
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=60000
+
+# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
+# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
+log.cleaner.enable=false
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:ZK_PORT
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.session.timeout.ms=3000
+zookeeper.connection.timeout.ms=3000
diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh
new file mode 100644
index 0000000..81d8ea0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -ex
+
+stop toxiproxy || true
+cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf
+cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/
+start toxiproxy
+
+for i in 1 2 3 4 5; do
+ ZK_PORT=`expr $i + 2180`
+ KAFKA_PORT=`expr $i + 9090`
+
+ stop zookeeper-${ZK_PORT} || true
+
+ # set up zk service
+ cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf
+ sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf
+
+ # set up kafka service
+ cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf
+ sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
+ sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
+
+ start zookeeper-${ZK_PORT}
+done
+
+# Wait for the last kafka node to finish booting
+while ! nc -q 1 localhost 29095 [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(logrus.JSONFormatter)
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(logrus.TextFormatter)
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
+
+ ```go
+ logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
+ ```
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..17fe6f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,252 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 0000000..a1623ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Level = logrus.DebugLevel
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "number": 8,
+ }).Debug("Started observing beach")
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "temperature": -4,
+ }).Debug("Temperature changes")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 0000000..cb5759a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
+}
+
+func main() {
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..a67e1b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,188 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..104d689
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,48 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
new file mode 100644
index 0000000..8ea93dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
@@ -0,0 +1,56 @@
+package logstash
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// Formatter generates json in logstash format.
+// Logstash site: http://logstash.net/
+type LogstashFormatter struct {
+ Type string // if not empty use for logstash type field.
+
+ // TimestampFormat sets the format used for timestamps.
+ TimestampFormat string
+}
+
+func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
+ entry.Data["@version"] = 1
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = logrus.DefaultTimestampFormat
+ }
+
+ entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
+
+ // set message field
+ v, ok := entry.Data["message"]
+ if ok {
+ entry.Data["fields.message"] = v
+ }
+ entry.Data["message"] = entry.Message
+
+ // set level field
+ v, ok = entry.Data["level"]
+ if ok {
+ entry.Data["fields.level"] = v
+ }
+ entry.Data["level"] = entry.Level.String()
+
+ // set type field
+ if f.Type != "" {
+ v, ok = entry.Data["type"]
+ if ok {
+ entry.Data["fields.type"] = v
+ }
+ entry.Data["type"] = f.Type
+ }
+
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..0da2b36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks levelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
new file mode 100644
index 0000000..b0502c3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
@@ -0,0 +1,54 @@
+package airbrake
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/tobi/airbrake-go"
+)
+
+// AirbrakeHook to send exceptions to an exception-tracking service compatible
+// with the Airbrake API.
+type airbrakeHook struct {
+ APIKey string
+ Endpoint string
+ Environment string
+}
+
+func NewHook(endpoint, apiKey, env string) *airbrakeHook {
+ return &airbrakeHook{
+ APIKey: apiKey,
+ Endpoint: endpoint,
+ Environment: env,
+ }
+}
+
+func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
+ airbrake.ApiKey = hook.APIKey
+ airbrake.Endpoint = hook.Endpoint
+ airbrake.Environment = hook.Environment
+
+ var notifyErr error
+ err, ok := entry.Data["error"].(error)
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
+ }
+
+ airErr := airbrake.Notify(notifyErr)
+ if airErr != nil {
+ return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
+ }
+
+ return nil
+}
+
+func (hook *airbrakeHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
new file mode 100644
index 0000000..d20a0f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
@@ -0,0 +1,68 @@
+package logrus_bugsnag
+
+import (
+ "errors"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/bugsnag/bugsnag-go"
+)
+
+type bugsnagHook struct{}
+
+// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
+// bugsnag.Configure. Bugsnag must be configured before the hook.
+var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
+
+// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
+// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
+// failed.
+type ErrBugsnagSendFailed struct {
+ err error
+}
+
+func (e ErrBugsnagSendFailed) Error() string {
+ return "failed to send error to Bugsnag: " + e.err.Error()
+}
+
+// NewBugsnagHook initializes a logrus hook which sends exceptions to an
+// exception-tracking service compatible with the Bugsnag API. Before using
+// this hook, you must call bugsnag.Configure(). The returned object should be
+// registered with a log via `AddHook()`
+//
+// Entries that trigger an Error, Fatal or Panic should now include an "error"
+// field to send to Bugsnag.
+func NewBugsnagHook() (*bugsnagHook, error) {
+ if bugsnag.Config.APIKey == "" {
+ return nil, ErrBugsnagUnconfigured
+ }
+ return &bugsnagHook{}, nil
+}
+
+// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
+// "error" field (or the Message if the error isn't present) and sends it off.
+func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
+ var notifyErr error
+ err, ok := entry.Data["error"].(error)
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
+ }
+
+ bugsnagErr := bugsnag.Notify(notifyErr)
+ if bugsnagErr != nil {
+ return ErrBugsnagSendFailed{bugsnagErr}
+ }
+
+ return nil
+}
+
+// Levels enumerates the log levels on which the error should be forwarded to
+// bugsnag: everything at or above the "Error" level.
+func (hook *bugsnagHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
new file mode 100644
index 0000000..ae61e92
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
@@ -0,0 +1,28 @@
+# Papertrail Hook for Logrus
+
+[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
+
+In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
+
+## Usage
+
+You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
+
+For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/papertrail"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
new file mode 100644
index 0000000..c0f10c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
@@ -0,0 +1,55 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+const (
+ format = "Jan 2 15:04:05"
+)
+
+// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
+type PapertrailHook struct {
+ Host string
+ Port int
+ AppName string
+ UDPConn net.Conn
+}
+
+// NewPapertrailHook creates a hook to be added to an instance of logger.
+func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
+ conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
+ return &PapertrailHook{host, port, appName, conn}, err
+}
+
+// Fire is called when a log event is fired.
+func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
+ date := time.Now().Format(format)
+ msg, _ := entry.String()
+ payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
+
+ bytesWritten, err := hook.UDPConn.Write([]byte(payload))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
+ return err
+ }
+
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *PapertrailHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
new file mode 100644
index 0000000..19e58bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
@@ -0,0 +1,61 @@
+# Sentry Hook for Logrus
+
+[Sentry](https://getsentry.com) provides both self-hosted and hosted
+solutions for exception tracking.
+Both client and server are
+[open source](https://github.com/getsentry/sentry).
+
+## Usage
+
+Every sentry application defined on the server gets a different
+[DSN](https://www.getsentry.com/docs/). In the example below replace
+`YOUR_DSN` with the one created for your application.
+
+```go
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/sentry"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ })
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+## Special fields
+
+Some logrus fields have a special meaning in this hook,
+these are server_name and logger.
+When logs are sent to sentry these fields are treated differently.
+- server_name (also known as hostname) is the name of the server which
+is logging the event (hostname.example.com)
+- logger is the part of the application which is logging the event.
+In go this usually means setting it to the name of the package.
+
+## Timeout
+
+`Timeout` is the time the sentry hook will wait for a response
+from the sentry server.
+
+If this time elapses with no response from
+the server an error will be returned.
+
+If `Timeout` is set to 0 the SentryHook will not wait for a reply
+and will assume a correct delivery.
+
+The SentryHook has a default timeout of `100 milliseconds` when created
+with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
+
+```go
+hook, _ := logrus_sentry.NewSentryHook(...)
+hook.Timeout = 20*time.Second
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
new file mode 100644
index 0000000..379f281
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
@@ -0,0 +1,100 @@
+package logrus_sentry
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+var (
+ severityMap = map[logrus.Level]raven.Severity{
+ logrus.DebugLevel: raven.DEBUG,
+ logrus.InfoLevel: raven.INFO,
+ logrus.WarnLevel: raven.WARNING,
+ logrus.ErrorLevel: raven.ERROR,
+ logrus.FatalLevel: raven.FATAL,
+ logrus.PanicLevel: raven.FATAL,
+ }
+)
+
+func getAndDel(d logrus.Fields, key string) (string, bool) {
+ var (
+ ok bool
+ v interface{}
+ val string
+ )
+ if v, ok = d[key]; !ok {
+ return "", false
+ }
+
+ if val, ok = v.(string); !ok {
+ return "", false
+ }
+ delete(d, key)
+ return val, true
+}
+
+// SentryHook delivers logs to a sentry server.
+type SentryHook struct {
+ // Timeout sets the time to wait for a delivery error from the sentry server.
+ // If this is set to zero the server will not wait for any response and will
+ // consider the message correctly sent
+ Timeout time.Duration
+
+ client *raven.Client
+ levels []logrus.Level
+}
+
+// NewSentryHook creates a hook to be added to an instance of logger
+// and initializes the raven client.
+// This method sets the timeout to 100 milliseconds.
+func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
+ client, err := raven.NewClient(DSN, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &SentryHook{100 * time.Millisecond, client, levels}, nil
+}
+
+// Called when an event should be sent to sentry
+// Special fields that sentry uses to give more information to the server
+// are extracted from entry.Data (if they are found)
+// These fields are: logger and server_name
+func (hook *SentryHook) Fire(entry *logrus.Entry) error {
+ packet := &raven.Packet{
+ Message: entry.Message,
+ Timestamp: raven.Timestamp(entry.Time),
+ Level: severityMap[entry.Level],
+ Platform: "go",
+ }
+
+ d := entry.Data
+
+ if logger, ok := getAndDel(d, "logger"); ok {
+ packet.Logger = logger
+ }
+ if serverName, ok := getAndDel(d, "server_name"); ok {
+ packet.ServerName = serverName
+ }
+ packet.Extra = map[string]interface{}(d)
+
+ _, errCh := hook.client.Capture(packet, nil)
+ timeout := hook.Timeout
+ if timeout != 0 {
+ timeoutCh := time.After(timeout)
+ select {
+ case err := <-errCh:
+ return err
+ case <-timeoutCh:
+ return fmt.Errorf("no response from sentry server in %s", timeout)
+ }
+ }
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *SentryHook) Levels() []logrus.Level {
+ return hook.levels
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 0000000..4dbb8e7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,20 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 0000000..b6fa374
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,59 @@
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..dcc4f1d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,40 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(f.TimestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..da928a3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,203 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks levelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(levelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stdout,
+ Formatter: new(TextFormatter),
+ Hooks: make(levelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..43ee12e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
new file mode 100644
index 0000000..8fe02a4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
new file mode 100644
index 0000000..0428ee5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
@@ -0,0 +1,20 @@
+/*
+ Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+*/
+package logrus
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..a2c0b40
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..b8bebc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go
new file mode 100644
index 0000000..af609a5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go
@@ -0,0 +1,7 @@
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..2e09f6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..612417f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,149 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ switch value.(type) {
+ case string:
+ if needsQuoting(value.(string)) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ case error:
+ if needsQuoting(value.(error).Error()) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ default:
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..1e30b1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,31 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ go logger.writerScanner(reader)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ logger.Print(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/.gitignore b/Godeps/_workspace/src/github.com/abbot/go-http-auth/.gitignore
new file mode 100644
index 0000000..112ea39
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/.gitignore
@@ -0,0 +1,5 @@
+*~
+*.a
+*.6
+*.out
+_testmain.go
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/LICENSE b/Godeps/_workspace/src/github.com/abbot/go-http-auth/LICENSE
new file mode 100644
index 0000000..e454a52
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/LICENSE
@@ -0,0 +1,178 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/Makefile b/Godeps/_workspace/src/github.com/abbot/go-http-auth/Makefile
new file mode 100644
index 0000000..25f208d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/Makefile
@@ -0,0 +1,12 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=auth_digest
+GOFILES=\
+ auth.go\
+ digest.go\
+ basic.go\
+ misc.go\
+ md5crypt.go\
+ users.go\
+
+include $(GOROOT)/src/Make.pkg
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/README.md b/Godeps/_workspace/src/github.com/abbot/go-http-auth/README.md
new file mode 100644
index 0000000..8a26f10
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/README.md
@@ -0,0 +1,70 @@
+HTTP Authentication implementation in Go
+========================================
+
+This is an implementation of HTTP Basic and HTTP Digest authentication
+in Go language. It is designed as a simple wrapper for
+http.RequestHandler functions.
+
+Features
+--------
+
+ * Supports HTTP Basic and HTTP Digest authentication.
+ * Supports htpasswd and htdigest formatted files.
+ * Automatic reloading of password files.
+ * Pluggable interface for user/password storage.
+ * Supports MD5 and SHA1 for Basic authentication password storage.
+ * Configurable Digest nonce cache size with expiration.
+ * Wrapper for legacy http handlers (http.HandlerFunc interface)
+
+Example usage
+-------------
+
+This is a complete working example for Basic auth:
+
+ package main
+
+ import (
+ auth "github.com/abbot/go-http-auth"
+ "fmt"
+ "net/http"
+ )
+
+ func Secret(user, realm string) string {
+ if user == "john" {
+ // password is "hello"
+ return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1"
+ }
+ return ""
+ }
+
+ func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
+ fmt.Fprintf(w, "Hello, %s! ", r.Username)
+ }
+
+ func main() {
+ authenticator := auth.NewBasicAuthenticator("example.com", Secret)
+ http.HandleFunc("/", authenticator.Wrap(handle))
+ http.ListenAndServe(":8080", nil)
+ }
+
+See more examples in the "examples" directory.
+
+Legal
+-----
+
+This module is developed under Apache 2.0 license, and can be used for
+open and proprietary projects.
+
+Copyright 2012-2013 Lev Shamardin
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file or any other part of this project except in
+compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/auth.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/auth.go
new file mode 100644
index 0000000..c4eb563
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/auth.go
@@ -0,0 +1,48 @@
+package auth
+
+import "net/http"
+
+/*
+ Request handlers must take AuthenticatedRequest instead of http.Request
+*/
+type AuthenticatedRequest struct {
+ http.Request
+ /*
+ Authenticated user name. Current API implies that Username is
+ never empty, which means that authentication is always done
+ before calling the request handler.
+ */
+ Username string
+}
+
+/*
+ AuthenticatedHandlerFunc is like http.HandlerFunc, but takes
+ AuthenticatedRequest instead of http.Request
+*/
+type AuthenticatedHandlerFunc func(http.ResponseWriter, *AuthenticatedRequest)
+
+/*
+ Authenticator wraps an AuthenticatedHandlerFunc with
+ authentication-checking code.
+
+ Typical Authenticator usage is something like:
+
+ authenticator := SomeAuthenticator(...)
+ http.HandleFunc("/", authenticator(my_handler))
+
+ Authenticator wrapper checks the user authentication and calls the
+ wrapped function only after authentication has succeeded. Otherwise,
+ it returns a handler which initiates the authentication procedure.
+*/
+type Authenticator func(AuthenticatedHandlerFunc) http.HandlerFunc
+
+type AuthenticatorInterface interface {
+ Wrap(AuthenticatedHandlerFunc) http.HandlerFunc
+}
+
+func JustCheck(auth AuthenticatorInterface, wrapped http.HandlerFunc) http.HandlerFunc {
+ return auth.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {
+ ar.Header.Set("X-Authenticated-Username", ar.Username)
+ wrapped(w, &ar.Request)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/basic.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/basic.go
new file mode 100644
index 0000000..b705c83
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/basic.go
@@ -0,0 +1,88 @@
+package auth
+
+import (
+ "crypto/sha1"
+ "encoding/base64"
+ "net/http"
+ "strings"
+)
+
+type BasicAuth struct {
+ Realm string
+ Secrets SecretProvider
+}
+
+/*
+ Checks the username/password combination from the request. Returns
+ either an empty string (authentication failed) or the name of the
+ authenticated user.
+
+ Supports MD5 and SHA1 password entries
+*/
+func (a *BasicAuth) CheckAuth(r *http.Request) string {
+ s := strings.SplitN(r.Header.Get("Authorization"), " ", 2)
+ if len(s) != 2 || s[0] != "Basic" {
+ return ""
+ }
+
+ b, err := base64.StdEncoding.DecodeString(s[1])
+ if err != nil {
+ return ""
+ }
+ pair := strings.SplitN(string(b), ":", 2)
+ if len(pair) != 2 {
+ return ""
+ }
+ passwd := a.Secrets(pair[0], a.Realm)
+ if passwd == "" {
+ return ""
+ }
+ if strings.HasPrefix(passwd, "{SHA}") {
+ d := sha1.New()
+ d.Write([]byte(pair[1]))
+ if passwd[5:] != base64.StdEncoding.EncodeToString(d.Sum(nil)) {
+ return ""
+ }
+ } else {
+ e := NewMD5Entry(passwd)
+ if e == nil {
+ return ""
+ }
+ if passwd != string(MD5Crypt([]byte(pair[1]), e.Salt, e.Magic)) {
+ return ""
+ }
+ }
+ return pair[0]
+}
+
+/*
+ http.Handler for BasicAuth which initiates the authentication process
+ (or requires reauthentication).
+*/
+func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("WWW-Authenticate", `Basic realm="`+a.Realm+`"`)
+ w.WriteHeader(401)
+ w.Write([]byte("401 Unauthorized\n"))
+}
+
+/*
+ BasicAuthenticator returns a function, which wraps an
+ AuthenticatedHandlerFunc converting it to http.HandlerFunc. This
+ wrapper function checks the authentication and either sends back
+ required authentication headers, or calls the wrapped function with
+ authenticated username in the AuthenticatedRequest.
+*/
+func (a *BasicAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if username := a.CheckAuth(r); username == "" {
+ a.RequireAuth(w, r)
+ } else {
+ ar := &AuthenticatedRequest{Request: *r, Username: username}
+ wrapped(w, ar)
+ }
+ }
+}
+
+func NewBasicAuthenticator(realm string, secrets SecretProvider) *BasicAuth {
+ return &BasicAuth{Realm: realm, Secrets: secrets}
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/digest.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/digest.go
new file mode 100644
index 0000000..b3225ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/digest.go
@@ -0,0 +1,226 @@
+package auth
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+type digest_client struct {
+ nc uint64
+ last_seen int64
+}
+
+type DigestAuth struct {
+ Realm string
+ Opaque string
+ Secrets SecretProvider
+ PlainTextSecrets bool
+
+ /*
+ Approximate size of Client's Cache. When actual number of
+ tracked client nonces exceeds
+ ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2
+ older entries are purged.
+ */
+ ClientCacheSize int
+ ClientCacheTolerance int
+
+ clients map[string]*digest_client
+ mutex sync.Mutex
+}
+
+type digest_cache_entry struct {
+ nonce string
+ last_seen int64
+}
+
+type digest_cache []digest_cache_entry
+
+func (c digest_cache) Less(i, j int) bool {
+ return c[i].last_seen < c[j].last_seen
+}
+
+func (c digest_cache) Len() int {
+ return len(c)
+}
+
+func (c digest_cache) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+/*
+ Remove count oldest entries from DigestAuth.clients
+*/
+func (a *DigestAuth) Purge(count int) {
+ entries := make([]digest_cache_entry, 0, len(a.clients))
+ for nonce, client := range a.clients {
+ entries = append(entries, digest_cache_entry{nonce, client.last_seen})
+ }
+ cache := digest_cache(entries)
+ sort.Sort(cache)
+ for _, client := range cache[:count] {
+ delete(a.clients, client.nonce)
+ }
+}
+
+/*
+ http.Handler for DigestAuth which initiates the authentication process
+ (or requires reauthentication).
+*/
+func (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
+ if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {
+ a.Purge(a.ClientCacheTolerance * 2)
+ }
+ nonce := RandomKey()
+ a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}
+ w.Header().Set("WWW-Authenticate",
+ fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`,
+ a.Realm, nonce, a.Opaque))
+ w.WriteHeader(401)
+ w.Write([]byte("401 Unauthorized\n"))
+}
+
+/*
+ Parse Authorization header from the http.Request. Returns a map of
+ auth parameters or nil if the header is not a valid parsable Digest
+ auth header.
+*/
+func DigestAuthParams(r *http.Request) map[string]string {
+ s := strings.SplitN(r.Header.Get("Authorization"), " ", 2)
+ if len(s) != 2 || s[0] != "Digest" {
+ return nil
+ }
+
+ result := map[string]string{}
+ for _, kv := range strings.Split(s[1], ",") {
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) != 2 {
+ continue
+ }
+ result[strings.Trim(parts[0], "\" ")] = strings.Trim(parts[1], "\" ")
+ }
+ return result
+}
+
+/*
+ Check if request contains valid authentication data. Returns a pair
+ of username, authinfo where username is the name of the authenticated
+ user or an empty string and authinfo is the contents for the optional
+ Authentication-Info response header.
+*/
+func (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) {
+ da.mutex.Lock()
+ defer da.mutex.Unlock()
+ username = ""
+ authinfo = nil
+ auth := DigestAuthParams(r)
+ if auth == nil || da.Opaque != auth["opaque"] || auth["algorithm"] != "MD5" || auth["qop"] != "auth" {
+ return
+ }
+
+ // Check if the requested URI matches auth header
+ switch u, err := url.Parse(auth["uri"]); {
+ case err != nil:
+ return
+ case r.URL == nil:
+ return
+ case len(u.Path) > len(r.URL.Path):
+ return
+ case !strings.HasPrefix(r.URL.Path, u.Path):
+ return
+ }
+
+ HA1 := da.Secrets(auth["username"], da.Realm)
+ if da.PlainTextSecrets {
+ HA1 = H(auth["username"] + ":" + da.Realm + ":" + HA1)
+ }
+ HA2 := H(r.Method + ":" + auth["uri"])
+ KD := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], HA2}, ":"))
+
+ if KD != auth["response"] {
+ return
+ }
+
+ // At this point crypto checks are completed and validated.
+ // Now check if the session is valid.
+
+ nc, err := strconv.ParseUint(auth["nc"], 16, 64)
+ if err != nil {
+ return
+ }
+
+ if client, ok := da.clients[auth["nonce"]]; !ok {
+ return
+ } else {
+ if client.nc != 0 && client.nc >= nc {
+ return
+ }
+ client.nc = nc
+ client.last_seen = time.Now().UnixNano()
+ }
+
+ resp_HA2 := H(":" + auth["uri"])
+ rspauth := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], resp_HA2}, ":"))
+
+ info := fmt.Sprintf(`qop="auth", rspauth="%s", cnonce="%s", nc="%s"`, rspauth, auth["cnonce"], auth["nc"])
+ return auth["username"], &info
+}
+
+/*
+ Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth
+*/
+const DefaultClientCacheSize = 1000
+const DefaultClientCacheTolerance = 100
+
+/*
+ Wrap returns an Authenticator which uses HTTP Digest
+ authentication. Arguments:
+
+ realm: The authentication realm.
+
+ secrets: SecretProvider which must return HA1 digests for the same
+ realm as above.
+*/
+func (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if username, authinfo := a.CheckAuth(r); username == "" {
+ a.RequireAuth(w, r)
+ } else {
+ ar := &AuthenticatedRequest{Request: *r, Username: username}
+ if authinfo != nil {
+ w.Header().Set("Authentication-Info", *authinfo)
+ }
+ wrapped(w, ar)
+ }
+ }
+}
+
+/*
+ JustCheck returns function which converts an http.HandlerFunc into a
+ http.HandlerFunc which requires authentication. Username is passed as
+ an extra X-Authenticated-Username header.
+*/
+func (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc {
+ return a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {
+ ar.Header.Set("X-Authenticated-Username", ar.Username)
+ wrapped(w, &ar.Request)
+ })
+}
+
+func NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth {
+ da := &DigestAuth{
+ Opaque: RandomKey(),
+ Realm: realm,
+ Secrets: secrets,
+ PlainTextSecrets: false,
+ ClientCacheSize: DefaultClientCacheSize,
+ ClientCacheTolerance: DefaultClientCacheTolerance,
+ clients: map[string]*digest_client{}}
+ return da
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/basic.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/basic.go
new file mode 100644
index 0000000..49d3989
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/basic.go
@@ -0,0 +1,35 @@
+// +build ignore
+
+/*
+ Example application using Basic auth
+
+ Build with:
+
+ go build basic.go
+*/
+
+package main
+
+import (
+ auth ".."
+ "fmt"
+ "net/http"
+)
+
+func Secret(user, realm string) string {
+ if user == "john" {
+ // password is "hello"
+ return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1"
+ }
+ return ""
+}
+
+func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
+ fmt.Fprintf(w, "Hello, %s! ", r.Username)
+}
+
+func main() {
+ authenticator := auth.NewBasicAuthenticator("example.com", Secret)
+ http.HandleFunc("/", authenticator.Wrap(handle))
+ http.ListenAndServe(":8080", nil)
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/digest.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/digest.go
new file mode 100644
index 0000000..3859893
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/digest.go
@@ -0,0 +1,35 @@
+// +build ignore
+
+/*
+ Example application using Digest auth
+
+ Build with:
+
+ go build digest.go
+*/
+
+package main
+
+import (
+ auth ".."
+ "fmt"
+ "net/http"
+)
+
+func Secret(user, realm string) string {
+ if user == "john" {
+ // password is "hello"
+ return "b98e16cbc3d01734b264adba7baa3bf9"
+ }
+ return ""
+}
+
+func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
+ fmt.Fprintf(w, "Hello, %s! ", r.Username)
+}
+
+func main() {
+ authenticator := auth.NewDigestAuthenticator("example.com", Secret)
+ http.HandleFunc("/", authenticator.Wrap(handle))
+ http.ListenAndServe(":8080", nil)
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/wrapped.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/wrapped.go
new file mode 100644
index 0000000..aa95ec3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/examples/wrapped.go
@@ -0,0 +1,36 @@
+// +build ignore
+
+/*
+ Example demonstrating how to wrap an application which is unaware of
+ authenticated requests with a "pass-through" authentication
+
+ Build with:
+
+ go build wrapped.go
+*/
+
+package main
+
+import (
+ auth ".."
+ "fmt"
+ "net/http"
+)
+
+func Secret(user, realm string) string {
+ if user == "john" {
+ // password is "hello"
+ return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1"
+ }
+ return ""
+}
+
+func regular_handler(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "This application is unaware of authentication ")
+}
+
+func main() {
+ authenticator := auth.NewBasicAuthenticator("example.com", Secret)
+ http.HandleFunc("/", auth.JustCheck(authenticator, regular_handler))
+ http.ListenAndServe(":8080", nil)
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/md5crypt.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/md5crypt.go
new file mode 100644
index 0000000..a7a031c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/md5crypt.go
@@ -0,0 +1,92 @@
+package auth
+
+import "crypto/md5"
+import "strings"
+
+const itoa64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+
+var md5_crypt_swaps = [16]int{12, 6, 0, 13, 7, 1, 14, 8, 2, 15, 9, 3, 5, 10, 4, 11}
+
+type MD5Entry struct {
+ Magic, Salt, Hash []byte
+}
+
+func NewMD5Entry(e string) *MD5Entry {
+ parts := strings.SplitN(e, "$", 4)
+ if len(parts) != 4 {
+ return nil
+ }
+ return &MD5Entry{
+ Magic: []byte("$" + parts[1] + "$"),
+ Salt: []byte(parts[2]),
+ Hash: []byte(parts[3]),
+ }
+}
+
+/*
+ MD5 password crypt implementation
+*/
+func MD5Crypt(password, salt, magic []byte) []byte {
+ d := md5.New()
+
+ d.Write(password)
+ d.Write(magic)
+ d.Write(salt)
+
+ d2 := md5.New()
+ d2.Write(password)
+ d2.Write(salt)
+ d2.Write(password)
+
+ for i, mixin := 0, d2.Sum(nil); i < len(password); i++ {
+ d.Write([]byte{mixin[i%16]})
+ }
+
+ for i := len(password); i != 0; i >>= 1 {
+ if i&1 == 0 {
+ d.Write([]byte{password[0]})
+ } else {
+ d.Write([]byte{0})
+ }
+ }
+
+ final := d.Sum(nil)
+
+ for i := 0; i < 1000; i++ {
+ d2 := md5.New()
+ if i&1 == 0 {
+ d2.Write(final)
+ } else {
+ d2.Write(password)
+ }
+
+ if i%3 != 0 {
+ d2.Write(salt)
+ }
+
+ if i%7 != 0 {
+ d2.Write(password)
+ }
+
+ if i&1 == 0 {
+ d2.Write(password)
+ } else {
+ d2.Write(final)
+ }
+ final = d2.Sum(nil)
+ }
+
+ result := make([]byte, 0, 22)
+ v := uint(0)
+ bits := uint(0)
+ for _, i := range md5_crypt_swaps {
+ v |= (uint(final[i]) << bits)
+ for bits = bits + 8; bits > 6; bits -= 6 {
+ result = append(result, itoa64[v&0x3f])
+ v >>= 6
+ }
+ }
+ result = append(result, itoa64[v&0x3f])
+
+ return append(append(append(magic, salt...), '$'), result...)
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/misc.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/misc.go
new file mode 100644
index 0000000..277a685
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/misc.go
@@ -0,0 +1,30 @@
+package auth
+
+import "encoding/base64"
+import "crypto/md5"
+import "crypto/rand"
+import "fmt"
+
+/*
+ Return a random 16-byte base64 alphabet string
+*/
+func RandomKey() string {
+ k := make([]byte, 12)
+ for bytes := 0; bytes < len(k); {
+ n, err := rand.Read(k[bytes:])
+ if err != nil {
+ panic("rand.Read() failed")
+ }
+ bytes += n
+ }
+ return base64.StdEncoding.EncodeToString(k)
+}
+
+/*
+ H function for MD5 algorithm (returns a lower-case hex MD5 digest)
+*/
+func H(data string) string {
+ digest := md5.New()
+ digest.Write([]byte(data))
+ return fmt.Sprintf("%x", digest.Sum(nil))
+}
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/test.htdigest b/Godeps/_workspace/src/github.com/abbot/go-http-auth/test.htdigest
new file mode 100644
index 0000000..6c8c75b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/test.htdigest
@@ -0,0 +1 @@
+test:example.com:aa78524fceb0e50fd8ca96dd818b8cf9
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/test.htpasswd b/Godeps/_workspace/src/github.com/abbot/go-http-auth/test.htpasswd
new file mode 100644
index 0000000..7b06989
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/test.htpasswd
@@ -0,0 +1,2 @@
+test:{SHA}qvTGHdzF6KLavt4PO0gs2a6pQ00=
+test2:$apr1$a0j62R97$mYqFkloXH0/UOaUnAiV2b0
diff --git a/Godeps/_workspace/src/github.com/abbot/go-http-auth/users.go b/Godeps/_workspace/src/github.com/abbot/go-http-auth/users.go
new file mode 100644
index 0000000..5e7d0b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/abbot/go-http-auth/users.go
@@ -0,0 +1,136 @@
+package auth
+
+import "encoding/csv"
+import "os"
+
+/*
+ SecretProvider is used by authenticators. Takes user name and realm
+ as an argument, returns secret required for authentication (HA1 for
+ digest authentication, properly encrypted password for basic).
+*/
+type SecretProvider func(user, realm string) string
+
+/*
+ Common functions for file auto-reloading
+*/
+type File struct {
+ Path string
+ Info os.FileInfo
+ /* must be set in inherited types during initialization */
+ Reload func()
+}
+
+func (f *File) ReloadIfNeeded() {
+ info, err := os.Stat(f.Path)
+ if err != nil {
+ panic(err)
+ }
+ if f.Info == nil || f.Info.ModTime() != info.ModTime() {
+ f.Info = info
+ f.Reload()
+ }
+}
+
+/*
+ Structure used for htdigest file authentication. Users map realms to
+ maps of users to their HA1 digests.
+*/
+type HtdigestFile struct {
+ File
+ Users map[string]map[string]string
+}
+
+func reload_htdigest(hf *HtdigestFile) {
+ r, err := os.Open(hf.Path)
+ if err != nil {
+ panic(err)
+ }
+ csv_reader := csv.NewReader(r)
+ csv_reader.Comma = ':'
+ csv_reader.Comment = '#'
+ csv_reader.TrimLeadingSpace = true
+
+ records, err := csv_reader.ReadAll()
+ if err != nil {
+ panic(err)
+ }
+
+ hf.Users = make(map[string]map[string]string)
+ for _, record := range records {
+ _, exists := hf.Users[record[1]]
+ if !exists {
+ hf.Users[record[1]] = make(map[string]string)
+ }
+ hf.Users[record[1]][record[0]] = record[2]
+ }
+}
+
+/*
+ SecretProvider implementation based on htdigest-formated files. Will
+ reload htdigest file on changes. Will panic on syntax errors in
+ htdigest files.
+*/
+func HtdigestFileProvider(filename string) SecretProvider {
+ hf := &HtdigestFile{File: File{Path: filename}}
+ hf.Reload = func() { reload_htdigest(hf) }
+ return func(user, realm string) string {
+ hf.ReloadIfNeeded()
+ _, exists := hf.Users[realm]
+ if !exists {
+ return ""
+ }
+ digest, exists := hf.Users[realm][user]
+ if !exists {
+ return ""
+ }
+ return digest
+ }
+}
+
+/*
+ Structure used for htdigest file authentication. Users map users to
+ their salted encrypted password
+*/
+type HtpasswdFile struct {
+ File
+ Users map[string]string
+}
+
+func reload_htpasswd(h *HtpasswdFile) {
+ r, err := os.Open(h.Path)
+ if err != nil {
+ panic(err)
+ }
+ csv_reader := csv.NewReader(r)
+ csv_reader.Comma = ':'
+ csv_reader.Comment = '#'
+ csv_reader.TrimLeadingSpace = true
+
+ records, err := csv_reader.ReadAll()
+ if err != nil {
+ panic(err)
+ }
+
+ h.Users = make(map[string]string)
+ for _, record := range records {
+ h.Users[record[0]] = record[1]
+ }
+}
+
+/*
+ SecretProvider implementation based on htpasswd-formated files. Will
+ reload htpasswd file on changes. Will panic on syntax errors in
+ htpasswd files. Realm argument of the SecretProvider is ignored.
+*/
+func HtpasswdFileProvider(filename string) SecretProvider {
+ h := &HtpasswdFile{File: File{Path: filename}}
+ h.Reload = func() { reload_htpasswd(h) }
+ return func(user, realm string) string {
+ h.ReloadIfNeeded()
+ password, exists := h.Users[user]
+ if !exists {
+ return ""
+ }
+ return password
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 0000000..5f14d11
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 0000000..05a179c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,124 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with code, message,
+// and original errors. Calling Error() will only return the error that is at the end
+// of the list.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ return newBaseError(code, message, origErr)
+}
+
+// NewBatchError returns an baseError with an expectation of an array of errors
+func NewBatchError(code, message string, errs []error) BatchError {
+ return newBaseErrors(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Printf("Error:", err.Error()
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 0000000..605f73c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,197 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and err.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the error.
+//
+// origErr is the error object which will be nested under the new error to be returned.
+func newBaseError(code, message string, origErr error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ }
+
+ if origErr != nil {
+ b.errs = append(b.errs, origErr)
+ }
+
+ return b
+}
+
+// newBaseErrors returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the error.
+//
+// origErrs is the error objects which will be nested under the new errors to be returned.
+func newBaseErrors(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no error
+// was set. This only returns the first element in the list. If the full list is
+// needed, use BatchError
+func (b baseError) OrigErr() error {
+ if size := len(b.errs); size > 0 {
+ return b.errs[0]
+ }
+
+ return nil
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is returned if
+// no error was set:w
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for request
+// status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 0000000..8429470
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,100 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ dst.Set(reflect.New(e))
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 0000000..59fa4a5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 0000000..4d2a01e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, value := range values {
+ value := reflect.Indirect(value)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 0000000..0de3eaa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,103 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 0000000..b6432f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,89 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ stringValue(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 0000000..c8d0564
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,120 @@
+package client
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint, SigningRegion string
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers,
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFront(logRequest)
+ c.Handlers.Send.PushBack(logResponse)
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+
+ if logBody {
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.Body.Seek(r.BodyStart, 0)
+ r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+ var msg = "no response data"
+ if r.HTTPResponse != nil {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
+ msg = string(dumpedBody)
+ } else if r.Error != nil {
+ msg = r.Error.Error()
+ }
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 0000000..24d39ce
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,45 @@
+package client
+
+import (
+ "math"
+ "math/rand"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// service.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() uint { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+ delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ if r.HTTPResponse.StatusCode >= 500 {
+ return true
+ }
+ return r.IsErrorRetryable()
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 0000000..4778056
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,12 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 0000000..9e83e92
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,311 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own default
+// number of retries. This will be the default action if Config.MaxRetries
+// is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the {defaults.DefaultConfig} structure.
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to retreive
+ // credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to
+ // a chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // @note You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // @see http://docs.aws.amazon.com/general/latest/gr/rande.html
+ // AWS Regions and Endpoints
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service specific
+ // configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the request.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for missing
+ // required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
+ // use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // @note This configuration option is specific to the Amazon S3 service.
+ // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
+ // client to create a new http.Client. This options is only meaningful if you're not
+ // already using a custom HTTP client with the SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.New() in order to disable the EC2Metadata
+ // overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ SleepDelay func(time.Duration)
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder methods to
+// set multiple configuration values inline without using pointers.
+//
+// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
+//
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = ®ion
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 0000000..d6a7b08
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,357 @@
+package aws
+
+import "time"
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 0000000..1d3e656
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,139 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "runtime"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ := strconv.ParseInt(slength, 10, 64)
+ r.HTTPRequest.ContentLength = length
+ return
+ }
+
+ var length int64
+ switch body := r.Body.(type) {
+ case nil:
+ length = 0
+ case lener:
+ length = int64(body.Len())
+ case io.Seeker:
+ r.BodyStart, _ = body.Seek(0, 1)
+ end, _ := body.Seek(0, 2)
+ body.Seek(r.BodyStart, 0) // make sure to seek back to original location
+ length = end - r.BodyStart
+ default:
+ panic("Cannot get length of body, must provide `ContentLength`")
+ }
+
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+}}
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
+ var err error
+ r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
+ if err != nil {
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other url redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+ }
+}}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+ r.Config.SleepDelay(r.RetryDelay)
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 0000000..ea07580
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,153 @@
+package corehandlers
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if r.ParamsFilled() {
+ v := validator{errors: []string{}}
+ v.validateAny(reflect.ValueOf(r.Params), "")
+
+ if count := len(v.errors); count > 0 {
+ format := "%d validation errors:\n- %s"
+ msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
+ r.Error = awserr.New("InvalidParameter", msg, nil)
+ }
+ }
+}}
+
+// A validator validates values. Collects validations errors which occurs.
+type validator struct {
+ errors []string
+}
+
+// There's no validation to be done on the contents of []byte values. Prepare
+// to check validateAny arguments against that type so we can quickly skip
+// them.
+var byteSliceType = reflect.TypeOf([]byte(nil))
+
+// validateAny will validate any struct, slice or map type. All validations
+// are also performed recursively for nested types.
+func (v *validator) validateAny(value reflect.Value, path string) {
+ value = reflect.Indirect(value)
+ if !value.IsValid() {
+ return
+ }
+
+ switch value.Kind() {
+ case reflect.Struct:
+ v.validateStruct(value, path)
+ case reflect.Slice:
+ if value.Type() == byteSliceType {
+ // We don't need to validate the contents of []byte.
+ return
+ }
+ for i := 0; i < value.Len(); i++ {
+ v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
+ }
+ case reflect.Map:
+ for _, n := range value.MapKeys() {
+ v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
+ }
+ }
+}
+
+// validateStruct will validate the struct value's fields. If the structure has
+// nested types those types will be validated also.
+func (v *validator) validateStruct(value reflect.Value, path string) {
+ prefix := "."
+ if path == "" {
+ prefix = ""
+ }
+
+ for i := 0; i < value.Type().NumField(); i++ {
+ f := value.Type().Field(i)
+ if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
+ continue
+ }
+ fvalue := value.FieldByName(f.Name)
+
+ err := validateField(f, fvalue, validateFieldRequired, validateFieldMin)
+ if err != nil {
+ v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
+ continue
+ }
+
+ v.validateAny(fvalue, path+prefix+f.Name)
+ }
+}
+
+type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error
+
+func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
+ for _, fn := range funcs {
+ if err := fn(f, fvalue); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validates that a field has a valid value provided for required fields.
+func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
+ if f.Tag.Get("required") == "" {
+ return nil
+ }
+
+ switch fvalue.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map:
+ if fvalue.IsNil() {
+ return fmt.Errorf("missing required parameter")
+ }
+ default:
+ if !fvalue.IsValid() {
+ return fmt.Errorf("missing required parameter")
+ }
+ }
+ return nil
+}
+
+// Validates that if a value is provided for a field, that value must be at
+// least a minimum length.
+func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
+ minStr := f.Tag.Get("min")
+ if minStr == "" {
+ return nil
+ }
+ min, _ := strconv.ParseInt(minStr, 10, 64)
+
+ kind := fvalue.Kind()
+ if kind == reflect.Ptr {
+ if fvalue.IsNil() {
+ return nil
+ }
+ fvalue = fvalue.Elem()
+ }
+
+ switch fvalue.Kind() {
+ case reflect.String:
+ if int64(fvalue.Len()) < min {
+ return fmt.Errorf("field too short, minimum length %d", min)
+ }
+ case reflect.Slice, reflect.Map:
+ if fvalue.IsNil() {
+ return nil
+ }
+ if int64(fvalue.Len()) < min {
+ return fmt.Errorf("field too short, minimum length %d", min)
+ }
+
+ // TODO min can also apply to number minimum value.
+
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 0000000..857311f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true
+ //
+ // @readonly
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// vai the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := NewChainCredentials(
+// []Provider{
+// &EnvProvider{},
+// &EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(&aws.Config{Credentials: creds})
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 0000000..7b8ebf5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,223 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewCredentials(&EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
+// // Access public S3 buckets.
+//
+// @readonly
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Refresh returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// A Credentials provides synchronous safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+ m sync.Mutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 0000000..aa9d689
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,178 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshalling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("SerializationError",
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 0000000..96655bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,77 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ //
+ // @readonly
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ //
+ // @readonly
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644
index 0000000..7fc91d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 0000000..7fb7cbf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,151 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/go-ini/ini"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ //
+ // @readonly
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
+ }
+
+ id, err := iniProfile.GetKey("aws_access_key_id")
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ err)
+ }
+
+ secret, err := iniProfile.GetKey("aws_secret_access_key")
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.Key("aws_session_token")
+
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if p.Filename == "" {
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
+ return p.Filename, nil
+ }
+
+ homeDir := os.Getenv("HOME") // *nix
+ if homeDir == "" { // Windows
+ homeDir = os.Getenv("USERPROFILE")
+ }
+ if homeDir == "" {
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 0000000..71189e7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,48 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ //
+ // @readonly
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set pragmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ s.Value.ProviderName = StaticProviderName
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 0000000..4d6408b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,134 @@
+// Package stscreds are credential Providers to retrieve STS AWS credentials.
+//
+// STS provides multiple ways to retrieve credentials which can be used when making
+// future AWS service API operation calls.
+package stscreds
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time. This provider must be used explicitly,
+// as it is not included in the credentials chain.
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfiede by the STS client.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+ // Apply defaults where parameters are not set.
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+
+ roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ })
+
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyId,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ ProviderName: ProviderName,
+ }, nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 0000000..043960d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,97 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithSleepDelay(time.Sleep)
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
+
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
+ ExpiryWindow: 5 * time.Minute,
+ },
+ }})
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 0000000..e5137ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,43 @@
+package ec2metadata
+
+import (
+ "path"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// GetMetadata uses the path provided to request
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 0000000..5b4379d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,124 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 0000000..5766361
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,17 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ //
+ // @readonly
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ //
+ // @readonly
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 0000000..db87188
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,112 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nill, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 0000000..5279c19
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,187 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ n.list = append([]NamedHandler{}, l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = []NamedHandler{}
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.list = append(l.list, NamedHandler{"__anonymous", f})
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ l.list = append(l.list, n)
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ l.list = append([]NamedHandler{n}, l.list...)
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ newlist := []NamedHandler{}
+ for _, m := range l.list {
+ if m.Name != n.Name {
+ newlist = append(newlist, m)
+ }
+ }
+ l.list = newlist
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 0000000..4b36900
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,298 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ Time time.Time
+ ExpireTime time.Duration
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+
+ built bool
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+}
+
+// Paginator keeps track of pagination configuration for an API operation.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+ p := operation.HTTPPath
+ if p == "" {
+ p = "/"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+ httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p)
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: nil,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.HTTPRequest.Body = ioutil.NopCloser(reader)
+ r.Body = reader
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = false
+ r.Sign()
+ if r.Error != nil {
+ return "", r.Error
+ }
+ return r.HTTPRequest.URL.String(), nil
+}
+
+// PresignRequest behaves just like presign, but hoists all headers and signs them.
+// Also returns the signed hash back to the user
+func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = true
+ r.Sign()
+ if r.Error != nil {
+ return "", nil, r.Error
+ }
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Error = nil
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request retuning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+func (r *Request) Send() error {
+ for {
+ r.Sign()
+ if r.Error != nil {
+ return r.Error
+ }
+
+ if aws.BoolValue(r.Retryable) {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // Re-seek the body back to the original point in for a retry so that
+ // send will send the body's contents again in the upcoming request.
+ r.Body.Seek(r.BodyStart, 0)
+ r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
+ }
+ r.Retryable = nil
+
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Send Request", true, err)
+ continue
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.UnmarshalError.Run(r)
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Response", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Validate Response", true, err)
+ continue
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Unmarshal Response", true, err)
+ continue
+ }
+
+ break
+ }
+
+ return nil
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 0000000..2939ec4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,104 @@
+package request
+
+import (
+ "reflect"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+//type Paginater interface {
+// HasNextPage() bool
+// NextPage() *Request
+// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
+//}
+
+// HasNextPage returns true if this request has more pages of data available.
+func (r *Request) HasNextPage() bool {
+ return len(r.nextPageTokens()) > 0
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of
+// data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ v, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(v) > 0 {
+ tokens = append(tokens, v[0])
+ tokenAdded = true
+ } else {
+ tokens = append(tokens, nil)
+ }
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+func (r *Request) NextPage() *Request {
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 0000000..ab6fff5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,82 @@
+package request
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the service.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
+ "TooManyRequestsException": {}, // Lambda functions
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+func (r *Request) IsErrorRetryable() bool {
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ return isCodeRetryable(err.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+func (r *Request) IsErrorExpired() bool {
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ return isCodeExpiredCreds(err.Code())
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 0000000..47e4536
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,120 @@
+// Package session provides a way to create service clients with shared configuration
+// and handlers.
+//
+// Generally this package should be used instead of the `defaults` package.
+//
+// A session should be used to share configurations and request handlers between multiple
+// service clients. When service clients need specific configuration aws.Config can be
+// used to provide additional configuration directly to the service client.
+package session
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the session concurrently.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided Configs
+// on top of the SDK's default configurations. Once the session is created it
+// can be mutated to modify Configs or Handlers. The session is safe to be read
+// concurrently, but it should not be written to concurrently.
+//
+// Example:
+// // Create a session with the default config and request handlers.
+// sess := session.New()
+//
+// // Create a session with a custom region
+// sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
+//
+// // Create a session, and add additional handlers for all service
+// // clients created with the session to inherit. Adds logging handler.
+// sess := session.New()
+// sess.Handlers.Send.PushFront(func(r *request.Request) {
+// // Log every request made and its payload
+// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
+// })
+//
+// // Create a S3 client instance from a session
+// sess := session.New()
+// svc := s3.New(sess)
+func New(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+
+ return s
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the session's copied config.
+//
+// Example:
+// // Create a copy of the current session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2"})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+//
+// Example:
+// sess := session.New()
+// s3.New(sess)
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+ endpoint, signingRegion := endpoints.NormalizeEndpoint(
+ aws.StringValue(s.Config.Endpoint), serviceName,
+ aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: endpoint,
+ SigningRegion: signingRegion,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 0000000..0f067c5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,88 @@
+package aws
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ b.m.Lock()
+ defer b.m.Unlock()
+
+ expLen := pos + int64(len(p))
+ if int64(len(b.buf)) < expLen {
+ newBuf := make([]byte, expLen)
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ copy(b.buf[pos:], p)
+ return len(p), nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf[:len(b.buf):len(b.buf)]
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 0000000..c15f50b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.1.3"
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
new file mode 100644
index 0000000..2b279e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
@@ -0,0 +1,65 @@
+// Package endpoints validates regional endpoints for services.
+package endpoints
+
+//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate gofmt -s -w endpoints_map.go
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// NormalizeEndpoint takes and endpoint and service API information to return a
+// normalized endpoint and signing region. If the endpoint is not an empty string
+// the service name and region will be used to look up the service's API endpoint.
+// If the endpoint is provided the scheme will be added if it is not present.
+func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
+ if endpoint == "" {
+ return EndpointForRegion(serviceName, region, disableSSL)
+ }
+
+ return AddScheme(endpoint, disableSSL), ""
+}
+
+// EndpointForRegion returns an endpoint and its signing region for a service and region.
+// if the service and region pair are not found endpoint and signingRegion will be empty.
+func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
+ derivedKeys := []string{
+ region + "/" + svcName,
+ region + "/*",
+ "*/" + svcName,
+ "*/*",
+ }
+
+ for _, key := range derivedKeys {
+ if val, ok := endpointsMap.Endpoints[key]; ok {
+ ep := val.Endpoint
+ ep = strings.Replace(ep, "{region}", region, -1)
+ ep = strings.Replace(ep, "{service}", svcName, -1)
+
+ endpoint = ep
+ signingRegion = val.SigningRegion
+ break
+ }
+ }
+
+ return AddScheme(endpoint, disableSSL), signingRegion
+}
+
+// Regular expression to determine if the endpoint string is prefixed with a scheme.
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if endpoint != "" && !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
new file mode 100644
index 0000000..0cb6917
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
@@ -0,0 +1,70 @@
+{
+ "version": 2,
+ "endpoints": {
+ "*/*": {
+ "endpoint": "{service}.{region}.amazonaws.com"
+ },
+ "cn-north-1/*": {
+ "endpoint": "{service}.{region}.amazonaws.com.cn",
+ "signatureVersion": "v4"
+ },
+ "us-gov-west-1/iam": {
+ "endpoint": "iam.us-gov.amazonaws.com"
+ },
+ "us-gov-west-1/sts": {
+ "endpoint": "sts.us-gov-west-1.amazonaws.com"
+ },
+ "us-gov-west-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "*/cloudfront": {
+ "endpoint": "cloudfront.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/cloudsearchdomain": {
+ "endpoint": "",
+ "signingRegion": "us-east-1"
+ },
+ "*/data.iot": {
+ "endpoint": "",
+ "signingRegion": "us-east-1"
+ },
+ "*/ec2metadata": {
+ "endpoint": "http://169.254.169.254/latest",
+ "signingRegion": "us-east-1"
+ },
+ "*/iam": {
+ "endpoint": "iam.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/importexport": {
+ "endpoint": "importexport.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/route53": {
+ "endpoint": "route53.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/sts": {
+ "endpoint": "sts.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/waf": {
+ "endpoint": "waf.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "us-east-1/sdb": {
+ "endpoint": "sdb.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "us-east-1/s3": {
+ "endpoint": "s3.amazonaws.com"
+ },
+ "eu-central-1/s3": {
+ "endpoint": "{service}.{region}.amazonaws.com"
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
new file mode 100644
index 0000000..6183dcd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
@@ -0,0 +1,83 @@
+package endpoints
+
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+type endpointStruct struct {
+ Version int
+ Endpoints map[string]endpointEntry
+}
+
+type endpointEntry struct {
+ Endpoint string
+ SigningRegion string
+}
+
+var endpointsMap = endpointStruct{
+ Version: 2,
+ Endpoints: map[string]endpointEntry{
+ "*/*": {
+ Endpoint: "{service}.{region}.amazonaws.com",
+ },
+ "*/cloudfront": {
+ Endpoint: "cloudfront.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/cloudsearchdomain": {
+ Endpoint: "",
+ SigningRegion: "us-east-1",
+ },
+ "*/data.iot": {
+ Endpoint: "",
+ SigningRegion: "us-east-1",
+ },
+ "*/ec2metadata": {
+ Endpoint: "http://169.254.169.254/latest",
+ SigningRegion: "us-east-1",
+ },
+ "*/iam": {
+ Endpoint: "iam.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/importexport": {
+ Endpoint: "importexport.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/route53": {
+ Endpoint: "route53.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "*/sts": {
+ Endpoint: "sts.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/waf": {
+ Endpoint: "waf.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "cn-north-1/*": {
+ Endpoint: "{service}.{region}.amazonaws.com.cn",
+ },
+ "eu-central-1/s3": {
+ Endpoint: "{service}.{region}.amazonaws.com",
+ },
+ "us-east-1/s3": {
+ Endpoint: "s3.amazonaws.com",
+ },
+ "us-east-1/sdb": {
+ Endpoint: "sdb.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "us-gov-west-1/iam": {
+ Endpoint: "iam.us-gov.amazonaws.com",
+ },
+ "us-gov-west-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "us-gov-west-1/sts": {
+ Endpoint: "sts.us-gov-west-1.amazonaws.com",
+ },
+ },
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 0000000..b03310a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..587b1fc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE b/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go
new file mode 100644
index 0000000..5dd748e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go
@@ -0,0 +1,187 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
+package dbus
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
+ num = `0123456789`
+ alphanum = alpha + num
+ signalBuffer = 100
+)
+
+// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
+func needsEscape(i int, b byte) bool {
+ // Escape everything that is not a-z-A-Z-0-9
+ // Also escape 0-9 if it's the first character
+ return strings.IndexByte(alphanum, b) == -1 ||
+ (i == 0 && strings.IndexByte(num, b) != -1)
+}
+
+// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
+// rules that systemd uses for serializing special characters.
+func PathBusEscape(path string) string {
+ // Special case the empty string
+ if len(path) == 0 {
+ return "_"
+ }
+ n := []byte{}
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if needsEscape(i, c) {
+ e := fmt.Sprintf("_%x", c)
+ n = append(n, []byte(e)...)
+ } else {
+ n = append(n, c)
+ }
+ }
+ return string(n)
+}
+
+// Conn is a connection to systemd's dbus endpoint.
+type Conn struct {
+ // sysconn/sysobj are only used to call dbus methods
+ sysconn *dbus.Conn
+ sysobj dbus.BusObject
+
+ // sigconn/sigobj are only used to receive dbus signals
+ sigconn *dbus.Conn
+ sigobj dbus.BusObject
+
+ jobListener struct {
+ jobs map[dbus.ObjectPath]chan<- string
+ sync.Mutex
+ }
+ subscriber struct {
+ updateCh chan<- *SubStateUpdate
+ errCh chan<- error
+ sync.Mutex
+ ignore map[dbus.ObjectPath]int64
+ cleanIgnore int64
+ }
+}
+
+// New establishes a connection to the system bus and authenticates.
+// Callers should call Close() when done with the connection.
+func New() (*Conn, error) {
+ return newConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SystemBusPrivate)
+ })
+}
+
+// NewUserConnection establishes a connection to the session bus and
+// authenticates. This can be used to connect to systemd user instances.
+// Callers should call Close() when done with the connection.
+func NewUserConnection() (*Conn, error) {
+ return newConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SessionBusPrivate)
+ })
+}
+
+// NewSystemdConnection establishes a private, direct connection to systemd.
+// This can be used for communicating with systemd without a dbus daemon.
+// Callers should call Close() when done with the connection.
+func NewSystemdConnection() (*Conn, error) {
+ return newConnection(func() (*dbus.Conn, error) {
+ // We skip Hello when talking directly to systemd.
+ return dbusAuthConnection(func() (*dbus.Conn, error) {
+ return dbus.Dial("unix:path=/run/systemd/private")
+ })
+ })
+}
+
+// Close closes an established connection
+func (c *Conn) Close() {
+ c.sysconn.Close()
+ c.sigconn.Close()
+}
+
+func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) {
+ sysconn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ sigconn, err := createBus()
+ if err != nil {
+ sysconn.Close()
+ return nil, err
+ }
+
+ c := &Conn{
+ sysconn: sysconn,
+ sysobj: systemdObject(sysconn),
+ sigconn: sigconn,
+ sigobj: systemdObject(sigconn),
+ }
+
+ c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
+ c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
+
+ // Setup the listeners on jobs so that we can get completions
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
+
+ c.dispatch()
+ return c, nil
+}
+
+func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ // Only use EXTERNAL method, and hardcode the uid (not username)
+ // to avoid a username lookup (which requires a dynamically linked
+ // libc)
+ methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
+
+ err = conn.Auth(methods)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := dbusAuthConnection(createBus)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func systemdObject(conn *dbus.Conn) dbus.BusObject {
+ return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go
new file mode 100644
index 0000000..ab614c7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go
@@ -0,0 +1,410 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "path"
+ "strconv"
+
+ "github.com/godbus/dbus"
+)
+
+func (c *Conn) jobComplete(signal *dbus.Signal) {
+ var id uint32
+ var job dbus.ObjectPath
+ var unit string
+ var result string
+ dbus.Store(signal.Body, &id, &job, &unit, &result)
+ c.jobListener.Lock()
+ out, ok := c.jobListener.jobs[job]
+ if ok {
+ out <- result
+ delete(c.jobListener.jobs, job)
+ }
+ c.jobListener.Unlock()
+}
+
+func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
+ if ch != nil {
+ c.jobListener.Lock()
+ defer c.jobListener.Unlock()
+ }
+
+ var p dbus.ObjectPath
+ err := c.sysobj.Call(job, 0, args...).Store(&p)
+ if err != nil {
+ return 0, err
+ }
+
+ if ch != nil {
+ c.jobListener.jobs[p] = ch
+ }
+
+ // ignore error since 0 is fine if conversion fails
+ jobID, _ := strconv.Atoi(path.Base(string(p)))
+
+ return jobID, nil
+}
+
+// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
+// specified by the mode string).
+//
+// Takes the unit to activate, plus a mode string. The mode needs to be one of
+// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
+// "replace" the call will start the unit and its dependencies, possibly
+// replacing already queued jobs that conflict with this. If "fail" the call
+// will start the unit and its dependencies, but will fail if this would change
+// an already queued job. If "isolate" the call will start the unit in question
+// and terminate all units that aren't dependencies of it. If
+// "ignore-dependencies" it will start a unit but ignore all its dependencies.
+// If "ignore-requirements" it will start a unit but only ignore the
+// requirement dependencies. It is not recommended to make use of the latter
+// two options.
+//
+// If the provided channel is non-nil, a result string will be sent to it upon
+// job completion: one of done, canceled, timeout, failed, dependency, skipped.
+// done indicates successful execution of a job. canceled indicates that a job
+// has been canceled before it finished execution. timeout indicates that the
+// job timeout was reached. failed indicates that the job failed. dependency
+// indicates that a job this job has been depending on failed and the job hence
+// has been removed too. skipped indicates that a job was skipped because it
+// didn't apply to the units current state.
+//
+// If no error occurs, the ID of the underlying systemd job will be returned. There
+// does exist the possibility for no error to be returned, but for the returned job
+// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
+// should not be considered authoritative.
+//
+// If an error does occur, it will be returned to the user alongside a job ID of 0.
+func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+}
+
+// StopUnit is similar to StartUnit but stops the specified unit rather
+// than starting it.
+func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+}
+
+// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
+func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+}
+
+// RestartUnit restarts a service. If a service is restarted that isn't
+// running it will be started.
+func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+}
+
+// TryRestartUnit is like RestartUnit, except that a service that isn't running
+// is not affected by the restart.
+func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+}
+
+// ReloadOrRestart attempts a reload if the unit supports it and use a restart
+// otherwise.
+func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+}
+
+// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
+// flavored restart otherwise.
+func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+}
+
+// StartTransientUnit() may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnit(), properties contains properties
+// of the unit.
+func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+}
+
+// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
+// processes are killed.
+func (c *Conn) KillUnit(name string, signal int32) {
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+}
+
+// ResetFailedUnit resets the "failed" state of a specific unit.
+func (c *Conn) ResetFailedUnit(name string) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
+}
+
+// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
+func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
+ var err error
+ var props map[string]dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make(map[string]interface{}, len(props))
+ for k, v := range props {
+ out[k] = v.Value()
+ }
+
+ return out, nil
+}
+
+// GetUnitProperties takes the unit name and returns all of its dbus object properties.
+func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
+}
+
+func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+ var err error
+ var prop dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Property{Name: propertyName, Value: prop}, nil
+}
+
+func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+}
+
+// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
+// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
+}
+
+// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Not all properties may be changed at runtime, but many resource management
+// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
+// instantly, and stored on disk for future boots, unless runtime is true, in which
+// case the settings only apply until the next reboot. name is the name of the unit
+// to modify. properties are the settings to set, encoded as an array of property
+// name and value pairs.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
+}
+
+func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
+}
+
+// ListUnits returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnits() ([]UnitStatus, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ status := make([]UnitStatus, len(result))
+ statusInterface := make([]interface{}, len(status))
+ for i := range status {
+ statusInterface[i] = &status[i]
+ }
+
+ err = dbus.Store(resultInterface, statusInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
+
+type UnitStatus struct {
+ Name string // The primary unit name as string
+ Description string // The human readable description string
+ LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
+ ActiveState string // The active state (i.e. whether the unit is currently started or not)
+ SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
+ Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
+ Path dbus.ObjectPath // The unit object path
+ JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
+ JobType string // The job type as string
+ JobPath dbus.ObjectPath // The job object path
+}
+
+type LinkUnitFileChange EnableUnitFileChange
+
+// LinkUnitFiles() links unit files (that are located outside of the
+// usual unit search paths) into the unit search path.
+//
+// It takes a list of absolute paths to unit files to link and two
+// booleans. The first boolean controls whether the unit shall be
+// enabled for runtime only (true, /run), or persistently (false,
+// /etc).
+// The second controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns a list of the changes made. The list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]LinkUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+// EnableUnitFiles() may be used to enable one or more units in the system (by
+// creating symlinks to them in /etc or /run).
+//
+// It takes a list of unit files to enable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and two booleans: the first controls whether the unit shall
+// be enabled for runtime only (true, /run), or persistently (false, /etc).
+// The second one controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns one boolean and an array with the changes made. The
+// boolean signals whether the unit files contained any enablement
+// information (i.e. an [Install]) section. The changes list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ var carries_install_info bool
+
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]EnableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return false, nil, err
+ }
+
+ return carries_install_info, changes, nil
+}
+
+type EnableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// DisableUnitFiles() may be used to disable one or more units in the system (by
+// removing symlinks to them from /etc or /run).
+//
+// It takes a list of unit files to disable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and one boolean: whether the unit was enabled for runtime
+// only (true, /run), or persistently (false, /etc).
+//
+// This call returns an array with the changes made. The changes list
+// consists of structures with three strings: the type of the change (one of
+// symlink or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]DisableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type DisableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// Reload instructs systemd to scan for and reload unit files. This is
+// equivalent to a 'systemctl daemon-reload'.
+func (c *Conn) Reload() error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+}
+
+func unitPath(name string) dbus.ObjectPath {
+ return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go
new file mode 100644
index 0000000..7520011
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go
@@ -0,0 +1,218 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "github.com/godbus/dbus"
+)
+
+// From the systemd docs:
+//
+// The properties array of StartTransientUnit() may take many of the settings
+// that may also be configured in unit files. Not all parameters are currently
+// accepted though, but we plan to cover more properties with future release.
+// Currently you may set the Description, Slice and all dependency types of
+// units, as well as RemainAfterExit, ExecStart for service units,
+// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
+// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
+// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
+// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
+// directly to their counterparts in unit files and as normal D-Bus object
+// properties. The exception here is the PIDs field of scope units which is
+// used for construction of the scope only and specifies the initial PIDs to
+// add to the scope object.
+
+type Property struct {
+ Name string
+ Value dbus.Variant
+}
+
+type PropertyCollection struct {
+ Name string
+ Properties []Property
+}
+
+type execStart struct {
+ Path string // the binary path to execute
+ Args []string // an array with all arguments to pass to the executed command, starting with argument 0
+ UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
+}
+
+// PropExecStart sets the ExecStart service property. The first argument is a
+// slice with the binary path to execute followed by the arguments to pass to
+// the executed command. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+func PropExecStart(command []string, uncleanIsFailure bool) Property {
+ execStarts := []execStart{
+ execStart{
+ Path: command[0],
+ Args: command,
+ UncleanIsFailure: uncleanIsFailure,
+ },
+ }
+
+ return Property{
+ Name: "ExecStart",
+ Value: dbus.MakeVariant(execStarts),
+ }
+}
+
+// PropRemainAfterExit sets the RemainAfterExit service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
+func PropRemainAfterExit(b bool) Property {
+ return Property{
+ Name: "RemainAfterExit",
+ Value: dbus.MakeVariant(b),
+ }
+}
+
+// PropDescription sets the Description unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
+func PropDescription(desc string) Property {
+ return Property{
+ Name: "Description",
+ Value: dbus.MakeVariant(desc),
+ }
+}
+
+func propDependency(name string, units []string) Property {
+ return Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+// PropRequires sets the Requires unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
+func PropRequires(units ...string) Property {
+ return propDependency("Requires", units)
+}
+
+// PropRequiresOverridable sets the RequiresOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
+func PropRequiresOverridable(units ...string) Property {
+ return propDependency("RequiresOverridable", units)
+}
+
+// PropRequisite sets the Requisite unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
+func PropRequisite(units ...string) Property {
+ return propDependency("Requisite", units)
+}
+
+// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
+func PropRequisiteOverridable(units ...string) Property {
+ return propDependency("RequisiteOverridable", units)
+}
+
+// PropWants sets the Wants unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
+func PropWants(units ...string) Property {
+ return propDependency("Wants", units)
+}
+
+// PropBindsTo sets the BindsTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
+func PropBindsTo(units ...string) Property {
+ return propDependency("BindsTo", units)
+}
+
+// PropRequiredBy sets the RequiredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
+func PropRequiredBy(units ...string) Property {
+ return propDependency("RequiredBy", units)
+}
+
+// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
+func PropRequiredByOverridable(units ...string) Property {
+ return propDependency("RequiredByOverridable", units)
+}
+
+// PropWantedBy sets the WantedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
+func PropWantedBy(units ...string) Property {
+ return propDependency("WantedBy", units)
+}
+
+// PropBoundBy sets the BoundBy unit property. See
+// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
+func PropBoundBy(units ...string) Property {
+ return propDependency("BoundBy", units)
+}
+
+// PropConflicts sets the Conflicts unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
+func PropConflicts(units ...string) Property {
+ return propDependency("Conflicts", units)
+}
+
+// PropConflictedBy sets the ConflictedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
+func PropConflictedBy(units ...string) Property {
+ return propDependency("ConflictedBy", units)
+}
+
+// PropBefore sets the Before unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
+func PropBefore(units ...string) Property {
+ return propDependency("Before", units)
+}
+
+// PropAfter sets the After unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
+func PropAfter(units ...string) Property {
+ return propDependency("After", units)
+}
+
+// PropOnFailure sets the OnFailure unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
+func PropOnFailure(units ...string) Property {
+ return propDependency("OnFailure", units)
+}
+
+// PropTriggers sets the Triggers unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
+func PropTriggers(units ...string) Property {
+ return propDependency("Triggers", units)
+}
+
+// PropTriggeredBy sets the TriggeredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
+func PropTriggeredBy(units ...string) Property {
+ return propDependency("TriggeredBy", units)
+}
+
+// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
+func PropPropagatesReloadTo(units ...string) Property {
+ return propDependency("PropagatesReloadTo", units)
+}
+
+// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
+func PropRequiresMountsFor(units ...string) Property {
+ return propDependency("RequiresMountsFor", units)
+}
+
+// PropSlice sets the Slice unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
+func PropSlice(slice string) Property {
+ return Property{
+ Name: "Slice",
+ Value: dbus.MakeVariant(slice),
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go
new file mode 100644
index 0000000..f92e6fb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go
@@ -0,0 +1,47 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+type set struct {
+ data map[string]bool
+}
+
+func (s *set) Add(value string) {
+ s.data[value] = true
+}
+
+func (s *set) Remove(value string) {
+ delete(s.data, value)
+}
+
+func (s *set) Contains(value string) (exists bool) {
+ _, exists = s.data[value]
+ return
+}
+
+func (s *set) Length() int {
+ return len(s.data)
+}
+
+func (s *set) Values() (values []string) {
+ for val, _ := range s.data {
+ values = append(values, val)
+ }
+ return
+}
+
+func newSet() *set {
+ return &set{make(map[string]bool)}
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go
new file mode 100644
index 0000000..9964514
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go
@@ -0,0 +1,250 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "time"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ cleanIgnoreInterval = int64(10 * time.Second)
+ ignoreInterval = int64(30 * time.Millisecond)
+)
+
+// Subscribe sets up this connection to subscribe to all systemd dbus events.
+// This is required before calling SubscribeUnits. When the connection closes
+// systemd will automatically stop sending signals so there is no need to
+// explicitly call Unsubscribe().
+func (c *Conn) Subscribe() error {
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
+
+ err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Unsubscribe this connection from systemd dbus events.
+func (c *Conn) Unsubscribe() error {
+ err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Conn) dispatch() {
+ ch := make(chan *dbus.Signal, signalBuffer)
+
+ c.sigconn.Signal(ch)
+
+ go func() {
+ for {
+ signal, ok := <-ch
+ if !ok {
+ return
+ }
+
+ if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
+ c.jobComplete(signal)
+ }
+
+ if c.subscriber.updateCh == nil {
+ continue
+ }
+
+ var unitPath dbus.ObjectPath
+ switch signal.Name {
+ case "org.freedesktop.systemd1.Manager.JobRemoved":
+ unitName := signal.Body[2].(string)
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
+ case "org.freedesktop.systemd1.Manager.UnitNew":
+ unitPath = signal.Body[1].(dbus.ObjectPath)
+ case "org.freedesktop.DBus.Properties.PropertiesChanged":
+ if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
+ unitPath = signal.Path
+ }
+ }
+
+ if unitPath == dbus.ObjectPath("") {
+ continue
+ }
+
+ c.sendSubStateUpdate(unitPath)
+ }
+ }()
+}
+
+// Returns two unbuffered channels which will receive all changed units every
+// interval. Deleted units are sent as nil.
+func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
+ return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
+}
+
+// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
+// size of the channels, the comparison function for detecting changes and a filter
+// function for cutting down on the noise that your channel receives.
+func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
+ old := make(map[string]*UnitStatus)
+ statusChan := make(chan map[string]*UnitStatus, buffer)
+ errChan := make(chan error, buffer)
+
+ go func() {
+ for {
+ timerChan := time.After(interval)
+
+ units, err := c.ListUnits()
+ if err == nil {
+ cur := make(map[string]*UnitStatus)
+ for i := range units {
+ if filterUnit != nil && filterUnit(units[i].Name) {
+ continue
+ }
+ cur[units[i].Name] = &units[i]
+ }
+
+ // add all new or changed units
+ changed := make(map[string]*UnitStatus)
+ for n, u := range cur {
+ if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
+ changed[n] = u
+ }
+ delete(old, n)
+ }
+
+ // add all deleted units
+ for oldN := range old {
+ changed[oldN] = nil
+ }
+
+ old = cur
+
+ if len(changed) != 0 {
+ statusChan <- changed
+ }
+ } else {
+ errChan <- err
+ }
+
+ <-timerChan
+ }
+ }()
+
+ return statusChan, errChan
+}
+
+type SubStateUpdate struct {
+ UnitName string
+ SubState string
+}
+
+// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
+// Although this writes to updateCh on every state change, the reported state
+// may be more recent than the change that generated it (due to an unavoidable
+// race in the systemd dbus interface). That is, this method provides a good
+// way to keep a current view of all units' states, but is not guaranteed to
+// show every state transition they go through. Furthermore, state changes
+// will only be written to the channel with non-blocking writes. If updateCh
+// is full, it attempts to write an error to errCh; if errCh is full, the error
+// passes silently.
+func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+ c.subscriber.updateCh = updateCh
+ c.subscriber.errCh = errCh
+}
+
+func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+
+ if c.shouldIgnore(path) {
+ return
+ }
+
+ info, err := c.GetUnitProperties(string(path))
+ if err != nil {
+ select {
+ case c.subscriber.errCh <- err:
+ default:
+ }
+ }
+
+ name := info["Id"].(string)
+ substate := info["SubState"].(string)
+
+ update := &SubStateUpdate{name, substate}
+ select {
+ case c.subscriber.updateCh <- update:
+ default:
+ select {
+ case c.subscriber.errCh <- errors.New("update channel full!"):
+ default:
+ }
+ }
+
+ c.updateIgnore(path, info)
+}
+
+// The ignore functions work around a wart in the systemd dbus interface.
+// Requesting the properties of an unloaded unit will cause systemd to send a
+// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
+// properties on UnitNew (as that's the only indication of a new unit coming up
+// for the first time), we would enter an infinite loop if we did not attempt
+// to detect and ignore these spurious signals. The signal themselves are
+// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
+// unloaded unit's signals for a short time after requesting its properties.
+// This means that we will miss e.g. a transient unit being restarted
+// *immediately* upon failure and also a transient unit being started
+// immediately after requesting its status (with systemctl status, for example,
+// because this causes a UnitNew signal to be sent which then causes us to fetch
+// the properties).
+
+func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
+ t, ok := c.subscriber.ignore[path]
+ return ok && t >= time.Now().UnixNano()
+}
+
+func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
+ c.cleanIgnore()
+
+ // unit is unloaded - it will trigger bad systemd dbus behavior
+ if info["LoadState"].(string) == "not-found" {
+ c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
+ }
+}
+
+// without this, ignore would grow unboundedly over time
+func (c *Conn) cleanIgnore() {
+ now := time.Now().UnixNano()
+ if c.subscriber.cleanIgnore < now {
+ c.subscriber.cleanIgnore = now + cleanIgnoreInterval
+
+ for p, t := range c.subscriber.ignore {
+ if t < now {
+ delete(c.subscriber.ignore, p)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go
new file mode 100644
index 0000000..5b408d5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go
@@ -0,0 +1,57 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "time"
+)
+
+// SubscriptionSet returns a subscription set which is like conn.Subscribe but
+// can filter to only return events for a set of units.
+type SubscriptionSet struct {
+ *set
+ conn *Conn
+}
+
+func (s *SubscriptionSet) filter(unit string) bool {
+ return !s.Contains(unit)
+}
+
+// Subscribe starts listening for dbus events for all of the units in the set.
+// Returns channels identical to conn.SubscribeUnits.
+func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
+ // TODO: Make fully evented by using systemd 209 with properties changed values
+ return s.conn.SubscribeUnitsCustom(time.Second, 0,
+ mismatchUnitStatus,
+ func(unit string) bool { return s.filter(unit) },
+ )
+}
+
+// NewSubscriptionSet returns a new subscription set.
+func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
+ return &SubscriptionSet{newSet(), conn}
+}
+
+// mismatchUnitStatus returns true if the provided UnitStatus objects
+// are not equivalent. false is returned if the objects are equivalent.
+// Only the Name, Description and state-related fields are used in
+// the comparison.
+func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
+ return u1.Name != u2.Name ||
+ u1.Description != u2.Description ||
+ u1.LoadState != u2.LoadState ||
+ u1.ActiveState != u2.ActiveState ||
+ u1.SubState != u2.SubState
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util.go b/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util.go
new file mode 100644
index 0000000..33832a1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util.go
@@ -0,0 +1,33 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package util contains utility functions related to systemd that applications
+// can use to check things like whether systemd is running.
+package util
+
+import (
+ "os"
+)
+
+// IsRunningSystemd checks whether the host was booted with systemd as its init
+// system. This functions similar to systemd's `sd_booted(3)`: internally, it
+// checks whether /run/systemd/system/ exists and is a directory.
+// http://www.freedesktop.org/software/systemd/man/sd_booted.html
+func IsRunningSystemd() bool {
+ fi, err := os.Lstat("/run/systemd/system")
+ if err != nil {
+ return false
+ }
+ return fi.IsDir()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/LICENSE
new file mode 100644
index 0000000..c7a3f0c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/NOTICE b/Godeps/_workspace/src/github.com/docker/docker/NOTICE
new file mode 100644
index 0000000..6e6f469
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE
new file mode 100644
index 0000000..4362b49
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/contrib/docker-engine-selinux/LICENSE
@@ -0,0 +1,502 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ , 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE
new file mode 100644
index 0000000..e67cdab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 Honza Pokorny
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/longpath/longpath.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 0000000..9b15bff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE
new file mode 100644
index 0000000..ac74d8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go
new file mode 100644
index 0000000..17dbd7a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go
@@ -0,0 +1,69 @@
+package mount
+
+import (
+ "strings"
+)
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ flags := map[string]struct {
+ clear bool
+ flag int
+ }{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "unbindable": {false, UNBINDABLE},
+ "runbindable": {false, RUNBINDABLE},
+ "private": {false, PRIVATE},
+ "rprivate": {false, RPRIVATE},
+ "shared": {false, SHARED},
+ "rshared": {false, RSHARED},
+ "slave": {false, SLAVE},
+ "rslave": {false, RSLAVE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+ }
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go
new file mode 100644
index 0000000..f166cb2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go
@@ -0,0 +1,48 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include
+*/
+import "C"
+
+const (
+ // RDONLY will mount the filesystem as read-only.
+ RDONLY = C.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = C.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = C.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go
new file mode 100644
index 0000000..2f9f5c5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go
@@ -0,0 +1,85 @@
+package mount
+
+import (
+ "syscall"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = syscall.MS_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = syscall.MS_NOSUID
+
+ // NODEV will not interpret character or block special devices on the file
+ // system.
+ NODEV = syscall.MS_NODEV
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = syscall.MS_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = syscall.MS_SYNCHRONOUS
+
+ // DIRSYNC will force all directory updates within the file system to be done
+ // synchronously. This affects the following system calls: creat, link,
+ // unlink, symlink, mkdir, rmdir, mknod and rename.
+ DIRSYNC = syscall.MS_DIRSYNC
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = syscall.MS_REMOUNT
+
+ // MANDLOCK will force mandatory locks on a filesystem.
+ MANDLOCK = syscall.MS_MANDLOCK
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = syscall.MS_NOATIME
+
+ // NODIRATIME will not update the directory access time.
+ NODIRATIME = syscall.MS_NODIRATIME
+
+ // BIND remounts a subtree somewhere else.
+ BIND = syscall.MS_BIND
+
+ // RBIND remounts a subtree and all possible submounts somewhere else.
+ RBIND = syscall.MS_BIND | syscall.MS_REC
+
+ // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+ UNBINDABLE = syscall.MS_UNBINDABLE
+
+ // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+ RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
+
+ // PRIVATE creates a mount which carries no propagation abilities.
+ PRIVATE = syscall.MS_PRIVATE
+
+ // RPRIVATE marks the entire mount tree as PRIVATE.
+ RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
+
+ // SLAVE creates a mount which receives propagation from its master, but not
+ // vice versa.
+ SLAVE = syscall.MS_SLAVE
+
+ // RSLAVE marks the entire mount tree as SLAVE.
+ RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
+
+ // SHARED creates a mount which provides the ability to create mirrors of
+ // that mount such that mounts and unmounts within any of the mirrors
+ // propagate to the other mirrors.
+ SHARED = syscall.MS_SHARED
+
+ // RSHARED marks the entire mount tree as SHARED.
+ RSHARED = syscall.MS_SHARED | syscall.MS_REC
+
+ // RELATIME updates inode access times relative to modify or change time.
+ RELATIME = syscall.MS_RELATIME
+
+ // STRICTATIME allows to explicitly request full atime updates. This makes
+ // it possible for the kernel to default to relatime or noatime but still
+ // allow userspace to override it.
+ STRICTATIME = syscall.MS_STRICTATIME
+)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go
new file mode 100644
index 0000000..a90d3d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go
@@ -0,0 +1,30 @@
+// +build !linux,!freebsd freebsd,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go
new file mode 100644
index 0000000..ed7216e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go
@@ -0,0 +1,74 @@
+package mount
+
+import (
+ "time"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+ return parseMountTable()
+}
+
+// Mounted looks at /proc/self/mountinfo to determine of the specified
+// mountpoint has been mounted
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ // Search the table for the mountpoint
+ for _, e := range entries {
+ if e.Mountpoint == mountpoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
+ }
+ return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+ flag, data := parseOptions(options)
+ if err := mount(device, target, mType, uintptr(flag), data); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Unmount will unmount the target filesystem, so long as it is mounted.
+func Unmount(target string) error {
+ if mounted, err := Mounted(target); err != nil || !mounted {
+ return err
+ }
+ return ForceUnmount(target)
+}
+
+// ForceUnmount will force an unmount of the target filesystem, regardless if
+// it is mounted or not.
+func ForceUnmount(target string) (err error) {
+ // Simple retry logic for unmount
+ for i := 0; i < 10; i++ {
+ if err = unmount(target, 0); err == nil {
+ return nil
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go
new file mode 100644
index 0000000..bb870e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,59 @@
+package mount
+
+/*
+#include
+#include
+#include
+#include
+#include
+#include
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return syscall.Unmount(target, flag)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go
new file mode 100644
index 0000000..dd4280c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go
@@ -0,0 +1,21 @@
+package mount
+
+import (
+ "syscall"
+)
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ if err := syscall.Mount(device, target, mType, flag, data); err != nil {
+ return err
+ }
+
+ // If we have a bind mount or remount, remount...
+ if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
+ return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return syscall.Unmount(target, flag)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go
new file mode 100644
index 0000000..eb93365
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd freebsd,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+ panic("Not implemented")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go
new file mode 100644
index 0000000..e3fc353
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go
@@ -0,0 +1,40 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc//mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 0000000..4f32edc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include
+#include
+#include
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*Info
+ for _, entry := range entries {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go
new file mode 100644
index 0000000..be69fee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*Info{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 0000000..8245f01
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux,!freebsd freebsd,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 0000000..47303bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,70 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ mounted, err := Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ mounted, err = Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ return ForceMount("", mountPoint, "none", options)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
new file mode 100644
index 0000000..9e4bd4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD
new file mode 100644
index 0000000..ac74d8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md
new file mode 100644
index 0000000..8dba54f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md
@@ -0,0 +1,6 @@
+Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks,
+as well as a Windows long-path aware version of filepath.EvalSymlinks
+from the [Go standard library](https://golang.org/pkg/path/filepath).
+
+The code from filepath.EvalSymlinks has been adapted in fs.go.
+Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go
new file mode 100644
index 0000000..dcf707f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// This code is a modified version of path/filepath/symlink.go from the Go standard library.
+
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
+// absolute path. This function handles paths in a platform-agnostic manner.
+func FollowSymlinkInScope(path, root string) (string, error) {
+ path, err := filepath.Abs(filepath.FromSlash(path))
+ if err != nil {
+ return "", err
+ }
+ root, err = filepath.Abs(filepath.FromSlash(root))
+ if err != nil {
+ return "", err
+ }
+ return evalSymlinksInScope(path, root)
+}
+
+// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
+// a result guaranteed to be contained within the scope `root`, at the time of the call.
+// Symlinks in `root` are not evaluated and left as-is.
+// Errors encountered while attempting to evaluate symlinks in path will be returned.
+// Non-existing paths are valid and do not constitute an error.
+// `path` has to contain `root` as a prefix, or else an error will be returned.
+// Trying to break out from `root` does not constitute an error.
+//
+// Example:
+// If /foo/bar -> /outside,
+// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
+//
+// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
+// are created and not to create subsequently, additional symlinks that could potentially make a
+// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
+// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
+// no longer be considered safely contained in "/foo".
+func evalSymlinksInScope(path, root string) (string, error) {
+ root = filepath.Clean(root)
+ if path == root {
+ return path, nil
+ }
+ if !strings.HasPrefix(path, root) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ const maxIter = 255
+ originalPath := path
+ // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
+ path = path[len(root):]
+ if root == string(filepath.Separator) {
+ path = string(filepath.Separator) + path
+ }
+ if !strings.HasPrefix(path, string(filepath.Separator)) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ path = filepath.Clean(path)
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ // b here will always be considered to be the "current absolute path inside
+ // root" when we append paths to it, we also append a slash and use
+ // filepath.Clean after the loop to trim the trailing slash
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
+ }
+
+ // find next path component, p
+ i := strings.IndexRune(path, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ continue
+ }
+
+ // this takes a b.String() like "b/../" and a p like "c" and turns it
+ // into "/b/../c" which then gets filepath.Cleaned into "/c" and then
+ // root gets prepended and we Clean again (to remove any trailing slash
+ // if the first Clean gave us just "/")
+ cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
+ if cleanP == string(filepath.Separator) {
+ // never Lstat "/" itself
+ b.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ fi, err := os.Lstat(fullP)
+ if os.IsNotExist(err) {
+ // if p does not exist, accept it
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p + string(filepath.Separator))
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(fullP)
+ if err != nil {
+ return "", err
+ }
+ if system.IsAbs(dest) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+
+ // see note above on "fullP := ..." for why this is double-cleaned and
+ // what's happening here
+ return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
+}
+
+// EvalSymlinks returns the path name after the evaluation of any symbolic
+// links.
+// If path is relative the result will be relative to the current directory,
+// unless one of the components is an absolute symbolic link.
+// This version has been updated to support long paths prepended with `\\?\`.
+func EvalSymlinks(path string) (string, error) {
+ return evalSymlinks(path)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_unix.go
new file mode 100644
index 0000000..818004f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package symlink
+
+import (
+ "path/filepath"
+)
+
+func evalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_windows.go
new file mode 100644
index 0000000..29bd456
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs_windows.go
@@ -0,0 +1,156 @@
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+func toShort(path string) (string, error) {
+ p, err := syscall.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetShortPathName says we can reuse buffer
+ n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ n, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ }
+ return syscall.UTF16ToString(b), nil
+}
+
+func toLong(path string) (string, error) {
+ p, err := syscall.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetLongPathName says we can reuse buffer
+ n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ }
+ b = b[:n]
+ return syscall.UTF16ToString(b), nil
+}
+
+func evalSymlinks(path string) (string, error) {
+ path, err := walkSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ p, err := toShort(path)
+ if err != nil {
+ return "", err
+ }
+ p, err = toLong(p)
+ if err != nil {
+ return "", err
+ }
+ // syscall.GetLongPathName does not change the case of the drive letter,
+ // but the result of EvalSymlinks must be unique, so we have
+ // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`).
+ // Make drive letter upper case.
+ if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' {
+ p = string(p[0]+'A'-'a') + p[1:]
+ } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' {
+ p = p[:3] + string(p[4]+'A'-'a') + p[5:]
+ }
+ return filepath.Clean(p), nil
+}
+
+const utf8RuneSelf = 0x80
+
+func walkSymlinks(path string) (string, error) {
+ const maxIter = 255
+ originalPath := path
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("EvalSymlinks: too many links in " + originalPath)
+ }
+
+ // A path beginnging with `\\?\` represents the root, so automatically
+ // skip that part and begin processing the next segment.
+ if strings.HasPrefix(path, longpath.Prefix) {
+ b.WriteString(longpath.Prefix)
+ path = path[4:]
+ continue
+ }
+
+ // find next path component, p
+ var i = -1
+ for j, c := range path {
+ if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) {
+ i = j
+ break
+ }
+ }
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ if b.Len() == 0 {
+ // must be absolute path
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // If this is the first segment after the long path prefix, accept the
+ // current segment as a volume root or UNC share and move on to the next.
+ if b.String() == longpath.Prefix {
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+
+ fi, err := os.Lstat(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p)
+ if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') {
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+ return filepath.Clean(b.String()), nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 0000000..31ed9ff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,31 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ // The max Unix time is 33 bits set
+ unixMaxTime := unixMinTime.Add((1<<33 - 1) * time.Second)
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 0000000..2883189
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 0000000..04e2de7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,83 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
+func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// SetEvent implements win32 SetEvent func in golang.
+func SetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+// ResetEvent implements win32 ResetEvent func in golang.
+func ResetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+// PulseEvent implements win32 PulseEvent func in golang.
+func PulseEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 0000000..c14feb8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 0000000..16823d5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,82 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = os.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 0000000..bd23c4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 0000000..49e87eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+)
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &StatT{
+ name: fi.Name(),
+ size: fi.Size(),
+ mode: fi.Mode(),
+ modTime: fi.ModTime(),
+ isDir: fi.IsDir()}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 0000000..3b6e947
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 0000000..a07bb17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,66 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/pkg/units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given a io.Reader to the file.
+//
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 0000000..82ddd30
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 0000000..d466425
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,44 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 0000000..7395818
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return syscall.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 0000000..2e863c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 0000000..087034c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,53 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// GetLastModification returns file's last modification time.
+func (s StatT) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 0000000..d0fb6f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 0000000..8b1eded
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT exists only on linux, and loads a system.StatT from a
+// syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 0000000..381ea82
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows,!freebsd
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 0000000..39490c6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like name, permission, size, etc about a file.
+type StatT struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
+}
+
+// Name returns file's name.
+func (s StatT) Name() string {
+ return s.name
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return s.mode
+}
+
+// ModTime returns file's last modification time.
+func (s StatT) ModTime() time.Time {
+ return s.modTime
+}
+
+// IsDir returns whether file is actually a directory.
+func (s StatT) IsDir() bool {
+ return s.isDir
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 0000000..c670fcd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and return oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return syscall.Umask(newmask), nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 0000000..13f1de1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
new file mode 100644
index 0000000..0a16197
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -0,0 +1,8 @@
+package system
+
+import "syscall"
+
+// LUtimesNano is not supported by darwin platform.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 0000000..e2eac3b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 0000000..007bfa8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,26 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the speficied path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ atFdCwd := -100
+ atSymLinkNoFollow := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 0000000..50c3a04
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd,!darwin
+
+package system
+
+import "syscall"
+
+// LUtimesNano is not supported on platforms other than linux, freebsd and darwin.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 0000000..d2e2c05
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,63 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ dest := make([]byte, 128)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno == syscall.ENODATA {
+ return nil, nil
+ }
+ if errno == syscall.ERANGE {
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+var _zero uintptr
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 0000000..0114f22
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
new file mode 100644
index 0000000..c219a8a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
new file mode 100644
index 0000000..3b59daf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE
new file mode 100644
index 0000000..e3d9a64
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Azure/go-ansiterm/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 0000000..5a8e332
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
new file mode 100644
index 0000000..5a8e332
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
new file mode 100644
index 0000000..5a8e332
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING
new file mode 100644
index 0000000..5a8e332
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE
new file mode 100644
index 0000000..bc756ae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Graylog2/go-gelf/LICENSE
@@ -0,0 +1,21 @@
+Copyright 2012 SocialCode
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/agl/ed25519/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE
new file mode 100644
index 0000000..106569e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/armon/go-metrics/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE
new file mode 100644
index 0000000..004e77f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/boltdb/bolt/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/etcd/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE
new file mode 100644
index 0000000..b5768f8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/deckarep/golang-set/LICENSE
@@ -0,0 +1,22 @@
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code
new file mode 100644
index 0000000..9e4bd4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.code
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs
new file mode 100644
index 0000000..e26cd4f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libkv/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libnetwork/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE
new file mode 100644
index 0000000..2744858
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/libtrust/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE
new file mode 100644
index 0000000..6daf85e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE
new file mode 100644
index 0000000..c8476ac
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/docker/notary/notarymysql/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sameer Naik
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/endophage/gotuf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/endophage/gotuf/LICENSE
new file mode 100644
index 0000000..d92ae9e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/endophage/gotuf/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2015, Docker Inc.
+Copyright (c) 2014-2015 Prime Directive, Inc.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Prime Directive, Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE
new file mode 100644
index 0000000..1aa91de
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/fluent/fluent-logger-golang/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2013 Tatsuo Kaniwa
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE
new file mode 100644
index 0000000..545cf2d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/go-check/check/LICENSE
@@ -0,0 +1,25 @@
+Gocheck - A rich testing framework for Go
+
+Copyright (c) 2010-2013 Gustavo Niemeyer
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE
new file mode 100644
index 0000000..06b252b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/godbus/dbus/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Georg Reinke ()
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE
new file mode 100644
index 0000000..0e5fb87
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000..0e5fb87
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md
new file mode 100644
index 0000000..ac2c064
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/consul/website/LICENSE.md
@@ -0,0 +1,10 @@
+# Proprietary License
+
+This license is temporary while a more official one is drafted. However,
+this should make it clear:
+
+* The text contents of this website are MPL 2.0 licensed.
+
+* The design contents of this website are proprietary and may not be reproduced
+ or reused in any way other than to run the Consul website locally. The license
+ for the design is owned solely by HashiCorp, Inc.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE
new file mode 100644
index 0000000..ccae99f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/go-msgpack/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2012, 2013 Ugorji Nwoke.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the author nor the names of its contributors may be used
+ to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/memberlist/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md
new file mode 100644
index 0000000..36c29d7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/hashicorp/serf/website/LICENSE.md
@@ -0,0 +1,10 @@
+# Proprietary License
+
+This license is temporary while a more official one is drafted. However,
+this should make it clear:
+
+* The text contents of this website are MPL 2.0 licensed.
+
+* The design contents of this website are proprietary and may not be reproduced
+ or reused in any way other than to run the Serf website locally. The license
+ for the design is owned solely by HashiCorp, Inc.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License
new file mode 100644
index 0000000..6b7558b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/kr/pty/License
@@ -0,0 +1,23 @@
+Copyright (c) 2011 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall
+be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE
new file mode 100644
index 0000000..ca458bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mattn/go-sqlite3/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE
new file mode 100644
index 0000000..b8b569d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/microsoft/hcsshim/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE
new file mode 100644
index 0000000..f4c265c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/mistifyio/go-zfs/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (c) 2014, OmniTI Computer Consulting, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 0000000..2744858
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE
new file mode 100644
index 0000000..bc00498
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/samuel/go-zookeeper/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Samuel Stauffer
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+* Neither the name of the author nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE
new file mode 100644
index 0000000..80dd96d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/syndtr/gocapability/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2013 Suryandaru Triandana
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE
new file mode 100644
index 0000000..e50d398
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tchap/go-patricia/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 The AUTHORS
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE
new file mode 100644
index 0000000..14d6042
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/tinylib/msgp/LICENSE
@@ -0,0 +1,8 @@
+Copyright (c) 2014 Philip Hofer
+Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE
new file mode 100644
index 0000000..95a0f05
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/ugorji/go/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2015 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE
new file mode 100644
index 0000000..968b453
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vaughan0/go-ini/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2013 Vaughan Newton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
+persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE
new file mode 100644
index 0000000..8ba5491
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vbatts/tar-split/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE
new file mode 100644
index 0000000..9f64db8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netlink/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE
new file mode 100644
index 0000000..9f64db8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/github.com/vishvananda/netns/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE
new file mode 100644
index 0000000..f21e540
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/gopkg.in/fsnotify.v1/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 0000000..9ea86d7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs
new file mode 100644
index 0000000..e26cd4f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 0000000..477be8b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,27 @@
+# go-connections maintainers file
+#
+# This file describes who runs the docker/go-connections project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "calavera",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+ [people.calavera]
+ Name = "David Calavera"
+ Email = "david.calavera@gmail.com"
+ GitHub = "calavera"
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/README.md b/Godeps/_workspace/src/github.com/docker/go-units/README.md
new file mode 100644
index 0000000..3ce4d79
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/README.md
@@ -0,0 +1,18 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code
+is released under the Apache 2.0 license. The README.md file, and files in the
+"docs" folder are licensed under the Creative Commons Attribution 4.0
+International License under the terms and conditions set forth in the file
+"LICENSE.docs". You may obtain a duplicate copy of the same license, titled
+CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/circle.yml b/Godeps/_workspace/src/github.com/docker/go-units/circle.yml
new file mode 100644
index 0000000..9043b35
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get github.com/golang/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/duration.go b/Godeps/_workspace/src/github.com/docker/go-units/duration.go
new file mode 100644
index 0000000..c219a8a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/size.go b/Godeps/_workspace/src/github.com/docker/go-units/size.go
new file mode 100644
index 0000000..3b59daf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go b/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go
new file mode 100644
index 0000000..5ac7fd8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/LICENSE b/Godeps/_workspace/src/github.com/eapache/go-resiliency/LICENSE
new file mode 100644
index 0000000..698a3f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/go-resiliency/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md
new file mode 100644
index 0000000..2d1b3d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md
@@ -0,0 +1,34 @@
+circuit-breaker
+===============
+
+[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+The circuit-breaker resiliency pattern for golang.
+
+Creating a breaker takes three parameters:
+- error threshold (for opening the breaker)
+- success threshold (for closing the breaker)
+- timeout (how long to keep the breaker open)
+
+```go
+b := breaker.New(3, 1, 5*time.Second)
+
+for {
+ result := b.Run(func() error {
+ // communicate with some external service and
+ // return an error if the communication failed
+ return nil
+ })
+
+ switch result {
+ case nil:
+ // success!
+ case breaker.ErrBreakerOpen:
+ // our function wasn't run because the breaker was open
+ default:
+ // some other error
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go
new file mode 100644
index 0000000..f88ca72
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -0,0 +1,161 @@
+// Package breaker implements the circuit-breaker resiliency pattern for Go.
+package breaker
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrBreakerOpen is the error returned from Run() when the function is not executed
+// because the breaker is currently open.
+var ErrBreakerOpen = errors.New("circuit breaker is open")
+
+const (
+ closed uint32 = iota
+ open
+ halfOpen
+)
+
+// Breaker implements the circuit-breaker resiliency pattern
+type Breaker struct {
+ errorThreshold, successThreshold int
+ timeout time.Duration
+
+ lock sync.Mutex
+ state uint32
+ errors, successes int
+ lastError time.Time
+}
+
+// New constructs a new circuit-breaker that starts closed.
+// From closed, the breaker opens if "errorThreshold" errors are seen
+// without an error-free period of at least "timeout". From open, the
+// breaker half-closes after "timeout". From half-open, the breaker closes
+// after "successThreshold" consecutive successes, or opens on a single error.
+func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
+ return &Breaker{
+ errorThreshold: errorThreshold,
+ successThreshold: successThreshold,
+ timeout: timeout,
+ }
+}
+
+// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function and pass along its return
+// value. It is safe to call Run concurrently on the same Breaker.
+func (b *Breaker) Run(work func() error) error {
+ state := atomic.LoadUint32(&b.state)
+
+ if state == open {
+ return ErrBreakerOpen
+ }
+
+ return b.doWork(state, work)
+}
+
+// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function in a separate goroutine.
+// If the function is run, Go will return nil immediately, and will *not* return
+// the return value of the function. It is safe to call Go concurrently on the
+// same Breaker.
+func (b *Breaker) Go(work func() error) error {
+ state := atomic.LoadUint32(&b.state)
+
+ if state == open {
+ return ErrBreakerOpen
+ }
+
+ // errcheck complains about ignoring the error return value, but
+ // that's on purpose; if you want an error from a goroutine you have to
+ // get it over a channel or something
+ go b.doWork(state, work)
+
+ return nil
+}
+
+func (b *Breaker) doWork(state uint32, work func() error) error {
+ var panicValue interface{}
+
+ result := func() error {
+ defer func() {
+ panicValue = recover()
+ }()
+ return work()
+ }()
+
+ if result == nil && panicValue == nil && state == closed {
+ // short-circuit the normal, success path without contending
+ // on the lock
+ return nil
+ }
+
+ // oh well, I guess we have to contend on the lock
+ b.processResult(result, panicValue)
+
+ if panicValue != nil {
+ // as close as Go lets us come to a "rethrow" although unfortunately
+ // we lose the original panicing location
+ panic(panicValue)
+ }
+
+ return result
+}
+
+func (b *Breaker) processResult(result error, panicValue interface{}) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if result == nil && panicValue == nil {
+ if b.state == halfOpen {
+ b.successes++
+ if b.successes == b.successThreshold {
+ b.closeBreaker()
+ }
+ }
+ } else {
+ if b.errors > 0 {
+ expiry := b.lastError.Add(b.timeout)
+ if time.Now().After(expiry) {
+ b.errors = 0
+ }
+ }
+
+ switch b.state {
+ case closed:
+ b.errors++
+ if b.errors == b.errorThreshold {
+ b.openBreaker()
+ } else {
+ b.lastError = time.Now()
+ }
+ case halfOpen:
+ b.openBreaker()
+ }
+ }
+}
+
+func (b *Breaker) openBreaker() {
+ b.changeState(open)
+ go b.timer()
+}
+
+func (b *Breaker) closeBreaker() {
+ b.changeState(closed)
+}
+
+func (b *Breaker) timer() {
+ time.Sleep(b.timeout)
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ b.changeState(halfOpen)
+}
+
+func (b *Breaker) changeState(newState uint32) {
+ b.errors = 0
+ b.successes = 0
+ atomic.StoreUint32(&b.state, newState)
+}
diff --git a/Godeps/_workspace/src/github.com/eapache/queue/.gitignore b/Godeps/_workspace/src/github.com/eapache/queue/.gitignore
new file mode 100644
index 0000000..8365624
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/queue/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml b/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml
new file mode 100644
index 0000000..235a40a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+sudo: false
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
diff --git a/Godeps/_workspace/src/github.com/eapache/queue/LICENSE b/Godeps/_workspace/src/github.com/eapache/queue/LICENSE
new file mode 100644
index 0000000..d5f36db
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/queue/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/eapache/queue/README.md b/Godeps/_workspace/src/github.com/eapache/queue/README.md
new file mode 100644
index 0000000..8e78233
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/queue/README.md
@@ -0,0 +1,16 @@
+Queue
+=====
+
+[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue)
+[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is in part because it is *not* thread-safe.
+
+Follows semantic versioning using https://gopkg.in/ - import from
+[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
+for guaranteed API stability.
diff --git a/Godeps/_workspace/src/github.com/eapache/queue/queue.go b/Godeps/_workspace/src/github.com/eapache/queue/queue.go
new file mode 100644
index 0000000..2dc8d93
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/eapache/queue/queue.go
@@ -0,0 +1,88 @@
+/*
+Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
+*/
+package queue
+
+const minQueueLen = 16
+
+// Queue represents a single instance of the queue data structure.
+type Queue struct {
+ buf []interface{}
+ head, tail, count int
+}
+
+// New constructs and returns a new Queue.
+func New() *Queue {
+ return &Queue{
+ buf: make([]interface{}, minQueueLen),
+ }
+}
+
+// Length returns the number of elements currently stored in the queue.
+func (q *Queue) Length() int {
+ return q.count
+}
+
+// resizes the queue to fit exactly twice its current contents
+// this can result in shrinking if the queue is less than half-full
+func (q *Queue) resize() {
+ newBuf := make([]interface{}, q.count*2)
+
+ if q.tail > q.head {
+ copy(newBuf, q.buf[q.head:q.tail])
+ } else {
+ n := copy(newBuf, q.buf[q.head:])
+ copy(newBuf[n:], q.buf[:q.tail])
+ }
+
+ q.head = 0
+ q.tail = q.count
+ q.buf = newBuf
+}
+
+// Add puts an element on the end of the queue.
+func (q *Queue) Add(elem interface{}) {
+ if q.count == len(q.buf) {
+ q.resize()
+ }
+
+ q.buf[q.tail] = elem
+ q.tail = (q.tail + 1) % len(q.buf)
+ q.count++
+}
+
+// Peek returns the element at the head of the queue. This call panics
+// if the queue is empty.
+func (q *Queue) Peek() interface{} {
+ if q.count <= 0 {
+ panic("queue: Peek() called on empty queue")
+ }
+ return q.buf[q.head]
+}
+
+// Get returns the element at index i in the queue. If the index is
+// invalid, the call will panic.
+func (q *Queue) Get(i int) interface{} {
+ if i < 0 || i >= q.count {
+ panic("queue: Get() called with index out of range")
+ }
+ return q.buf[(q.head+i)%len(q.buf)]
+}
+
+// Remove removes the element from the front of the queue. If you actually
+// want the element, call Peek first. This call panics if the queue is empty.
+func (q *Queue) Remove() {
+ if q.count <= 0 {
+ panic("queue: Remove() called on empty queue")
+ }
+ q.buf[q.head] = nil
+ q.head = (q.head + 1) % len(q.buf)
+ q.count--
+ if len(q.buf) > minQueueLen && q.count*4 == len(q.buf) {
+ q.resize()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE
new file mode 100644
index 0000000..5782c72
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2014, Elazar Leibovich
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
new file mode 100644
index 0000000..27ee48f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
@@ -0,0 +1,46 @@
+# go-bindata-assetfs
+
+Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
+
+[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
+
+### Installation
+
+Install with
+
+ $ go get github.com/jteeuwen/go-bindata/...
+ $ go get github.com/elazarl/go-bindata-assetfs/...
+
+### Creating embedded data
+
+Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
+instead of running `go-bindata` run `go-bindata-assetfs`.
+
+The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
+
+A typical use case is
+
+ $ go-bindata-assetfs data/...
+
+### Using assetFS in your code
+
+The generated file provides an `assetFS()` function that returns a `http.Filesystem`
+wrapping the embedded files. What you usually want to do is:
+
+ http.Handle("/", http.FileServer(assetFS()))
+
+This would run an HTTP server serving the embedded files.
+
+## Without running binary tool
+
+You can always just run the `go-bindata` tool, and then
+
+use
+
+ import "github.com/elazarl/go-bindata-assetfs"
+ ...
+ http.Handle("/",
+ http.FileServer(
+ &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"}))
+
+to serve files embedded from the `data` directory.
diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
new file mode 100644
index 0000000..9397e58
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
@@ -0,0 +1,158 @@
+package assetfs
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "time"
+)
+
+var (
+ defaultFileTimestamp = time.Now()
+)
+
+// FakeFile implements os.FileInfo interface for a given path and size
+type FakeFile struct {
+ // Path is the path of this file
+ Path string
+ // Dir marks of the path is a directory
+ Dir bool
+ // Len is the length of the fake file, zero if it is a directory
+ Len int64
+ // Timestamp is the ModTime of this file
+ Timestamp time.Time
+}
+
+func (f *FakeFile) Name() string {
+ _, name := filepath.Split(f.Path)
+ return name
+}
+
+func (f *FakeFile) Mode() os.FileMode {
+ mode := os.FileMode(0644)
+ if f.Dir {
+ return mode | os.ModeDir
+ }
+ return mode
+}
+
+func (f *FakeFile) ModTime() time.Time {
+ return f.Timestamp
+}
+
+func (f *FakeFile) Size() int64 {
+ return f.Len
+}
+
+func (f *FakeFile) IsDir() bool {
+ return f.Mode().IsDir()
+}
+
+func (f *FakeFile) Sys() interface{} {
+ return nil
+}
+
+// AssetFile implements http.File interface for a no-directory file with content
+type AssetFile struct {
+ *bytes.Reader
+ io.Closer
+ FakeFile
+}
+
+func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile {
+ if timestamp.IsZero() {
+ timestamp = defaultFileTimestamp
+ }
+ return &AssetFile{
+ bytes.NewReader(content),
+ ioutil.NopCloser(nil),
+ FakeFile{name, false, int64(len(content)), timestamp}}
+}
+
+func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) {
+ return nil, errors.New("not a directory")
+}
+
+func (f *AssetFile) Size() int64 {
+ return f.FakeFile.Size()
+}
+
+func (f *AssetFile) Stat() (os.FileInfo, error) {
+ return f, nil
+}
+
+// AssetDirectory implements http.File interface for a directory
+type AssetDirectory struct {
+ AssetFile
+ ChildrenRead int
+ Children []os.FileInfo
+}
+
+func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory {
+ fileinfos := make([]os.FileInfo, 0, len(children))
+ for _, child := range children {
+ _, err := fs.AssetDir(filepath.Join(name, child))
+ fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}})
+ }
+ return &AssetDirectory{
+ AssetFile{
+ bytes.NewReader(nil),
+ ioutil.NopCloser(nil),
+ FakeFile{name, true, 0, time.Time{}},
+ },
+ 0,
+ fileinfos}
+}
+
+func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) {
+ if count <= 0 {
+ return f.Children, nil
+ }
+ if f.ChildrenRead+count > len(f.Children) {
+ count = len(f.Children) - f.ChildrenRead
+ }
+ rv := f.Children[f.ChildrenRead : f.ChildrenRead+count]
+ f.ChildrenRead += count
+ return rv, nil
+}
+
+func (f *AssetDirectory) Stat() (os.FileInfo, error) {
+ return f, nil
+}
+
+// AssetFS implements http.FileSystem, allowing
+// embedded files to be served from net/http package.
+type AssetFS struct {
+ // Asset should return content of file in path if exists
+ Asset func(path string) ([]byte, error)
+ // AssetDir should return list of files in the path
+ AssetDir func(path string) ([]string, error)
+ // AssetInfo should return the info of file in path if exists
+ AssetInfo func(path string) (os.FileInfo, error)
+ // Prefix would be prepended to http requests
+ Prefix string
+}
+
+func (fs *AssetFS) Open(name string) (http.File, error) {
+ name = path.Join(fs.Prefix, name)
+ if len(name) > 0 && name[0] == '/' {
+ name = name[1:]
+ }
+ if b, err := fs.Asset(name); err == nil {
+ timestamp := defaultFileTimestamp
+ if info, err := fs.AssetInfo(name); err == nil {
+ timestamp = info.ModTime()
+ }
+ return NewAssetFile(name, b, timestamp), nil
+ }
+ if children, err := fs.AssetDir(name); err == nil {
+ return NewAssetDirectory(name, children, fs), nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go
new file mode 100644
index 0000000..a664249
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go
@@ -0,0 +1,13 @@
+// assetfs allows packages to serve static content embedded
+// with the go-bindata tool with the standard net/http package.
+//
+// See https://github.com/jteeuwen/go-bindata for more information
+// about embedding binary data with go-bindata.
+//
+// Usage example, after running
+// $ go-bindata data/...
+// use:
+// http.Handle("/",
+// http.FileServer(
+// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
+package assetfs
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/.gitignore b/Godeps/_workspace/src/github.com/flosch/pongo2/.gitignore
new file mode 100644
index 0000000..37eaf44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/.gitignore
@@ -0,0 +1,40 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.idea
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.project
+EBNF.txt
+test1.tpl
+pongo2_internal_test.go
+tpl-error.out
+/count.out
+/cover.out
+*.swp
+*.iml
+/cpu.out
+/mem.out
+/pongo2.test
+*.error
+/profile
+/coverage.out
+/pongo2_internal_test.ignore
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/.travis.yml b/Godeps/_workspace/src/github.com/flosch/pongo2/.travis.yml
new file mode 100644
index 0000000..18971e1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.4
+ - tip
+install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - go get gopkg.in/check.v1
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out -bench . -cpu 1,4
+ - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN || true'
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/AUTHORS b/Godeps/_workspace/src/github.com/flosch/pongo2/AUTHORS
new file mode 100644
index 0000000..b552df4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/AUTHORS
@@ -0,0 +1,10 @@
+Main author and maintainer of pongo2:
+
+* Florian Schlachter
+
+Contributors (in no specific order):
+
+* @romanoaugusto88
+* @vitalbh
+
+Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/LICENSE b/Godeps/_workspace/src/github.com/flosch/pongo2/LICENSE
new file mode 100644
index 0000000..e876f86
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2014 Florian Schlachter
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/README.md b/Godeps/_workspace/src/github.com/flosch/pongo2/README.md
new file mode 100644
index 0000000..609adab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/README.md
@@ -0,0 +1,253 @@
+# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
+
+[![Join the chat at https://gitter.im/flosch/pongo2](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/flosch/pongo2)
+[![GoDoc](https://godoc.org/github.com/flosch/pongo2?status.svg)](https://godoc.org/github.com/flosch/pongo2)
+[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2)
+[![Coverage Status](https://coveralls.io/repos/flosch/pongo2/badge.svg?branch=master)](https://coveralls.io/r/flosch/pongo2?branch=master)
+[![gratipay](http://img.shields.io/badge/gratipay-support%20pongo-brightgreen.svg)](https://gratipay.com/flosch/)
+[![Bountysource](https://www.bountysource.com/badge/tracker?tracker_id=3654947)](https://www.bountysource.com/trackers/3654947-pongo2?utm_source=3654947&utm_medium=shield&utm_campaign=TRACKER_BADGE)
+
+pongo2 is the successor of [pongo](https://github.com/flosch/pongo), a Django-syntax like templating-language.
+
+Install/update using `go get` (no dependencies required by pongo2):
+```
+go get -u github.com/flosch/pongo2
+```
+
+Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)). If possible, please use [playground](https://www.florian-schlachter.de/pongo2/) to create a short test case on what's wrong and include the link to the snippet in your issue.
+
+**New**: [Try pongo2 out in the pongo2 playground.](https://www.florian-schlachter.de/pongo2/)
+
+## First impression of a template
+
+```HTML+Django
+Our admins and users
+{# This is a short example to give you a quick overview of pongo2's syntax. #}
+
+{% macro user_details(user, is_admin=false) %}
+
+
+
= 40) || (user.karma > calc_avg_karma(userlist)+5) %}
+ class="karma-good"{% endif %}>
+
+
+ {{ user }}
+
+
+
+
This user registered {{ user.register_date|naturaltime }}.
+
+
+
The user's biography:
+
{{ user.biography|markdown|truncatewords_html:15 }}
+ read more
+
+ {% if is_admin %}
This user is an admin!
{% endif %}
+
+{% endmacro %}
+
+
+
+
+ Our admins
+ {% for admin in adminlist %}
+ {{ user_details(admin, true) }}
+ {% endfor %}
+
+ Our members
+ {% for user in userlist %}
+ {{ user_details(user) }}
+ {% endfor %}
+
+
+```
+
+## Development status
+
+**Latest stable release**: v3.0 (`go get -u gopkg.in/flosch/pongo2.v3` / [`v3`](https://github.com/flosch/pongo2/tree/v3)-branch) [[read the announcement](https://www.florian-schlachter.de/post/pongo2-v3/)]
+
+**Current development**: v4 (`master`-branch)
+
+*Note*: With the release of pongo v4 the branch v2 will be deprecated.
+
+**Deprecated versions** (not supported anymore): v1
+
+| Topic | Status |
+| ------------------------------------ | -------------------------------------------------------------------------------------- |
+| Django version compatibility: | [1.7](https://docs.djangoproject.com/en/1.7/ref/templates/builtins/) |
+| *Missing* (planned) **filters**: | none ([hints](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3)) |
+| *Missing* (planned) **tags**: | none ([hints](https://github.com/flosch/pongo2/blob/master/tags.go#L3)) |
+
+Please also have a look on the [caveats](https://github.com/flosch/pongo2#caveats) and on the [official add-ons](https://github.com/flosch/pongo2#official).
+
+## Features (and new in pongo2)
+
+ * Entirely rewritten from the ground-up.
+ * [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
+ * [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
+ * [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
+ * Additional features:
+ * Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
+ * [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
+
+## Recent API changes within pongo2
+
+If you're using the `master`-branch of pongo2, you might be interested in this section. Since pongo2 is still in development (even though there is a first stable release!), there could be (backwards-incompatible) API changes over time. To keep track of these and therefore make it painless for you to adapt your codebase, I'll list them here.
+
+ * Function signature for tag execution changed: not taking a `bytes.Buffer` anymore; instead `Execute()`-functions are now taking a `TemplateWriter` interface.
+ * Function signature for tag and filter parsing/execution changed (`error` return type changed to `*Error`).
+ * `INodeEvaluator` has been removed and got replaced by `IEvaluator`. You can change your existing tags/filters by simply replacing the interface.
+ * Two new helper functions: [`RenderTemplateFile()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateFile) and [`RenderTemplateString()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateString).
+ * `Template.ExecuteRW()` is now [`Template.ExecuteWriter()`](https://godoc.org/github.com/flosch/pongo2#Template.ExecuteWriter)
+ * `Template.Execute*()` functions do now take a `pongo2.Context` directly (no pointer anymore).
+
+## How you can help
+
+ * Write [filters](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3) / [tags](https://github.com/flosch/pongo2/blob/master/tags.go#L4) (see [tutorial](https://www.florian-schlachter.de/post/pongo2/)) by forking pongo2 and sending pull requests
+ * Write/improve code tests (use the following command to see what tests are missing: `go test -v -cover -covermode=count -coverprofile=cover.out && go tool cover -html=cover.out` or have a look on [gocover.io/github.com/flosch/pongo2](http://gocover.io/github.com/flosch/pongo2))
+ * Write/improve template tests (see the `template_tests/` directory)
+ * Write middleware, libraries and websites using pongo2. :-)
+
+# Documentation
+
+For a documentation on how the templating language works you can [head over to the Django documentation](https://docs.djangoproject.com/en/dev/topics/templates/). pongo2 aims to be compatible with it.
+
+You can access pongo2's API documentation on [godoc](https://godoc.org/github.com/flosch/pongo2).
+
+## Blog post series
+
+ * [pongo2 v3 released](https://www.florian-schlachter.de/post/pongo2-v3/)
+ * [pongo2 v2 released](https://www.florian-schlachter.de/post/pongo2-v2/)
+ * [pongo2 1.0 released](https://www.florian-schlachter.de/post/pongo2-10/) [August 8th 2014]
+ * [pongo2 playground](https://www.florian-schlachter.de/post/pongo2-playground/) [August 1st 2014]
+ * [Release of pongo2 1.0-rc1 + pongo2-addons](https://www.florian-schlachter.de/post/pongo2-10-rc1/) [July 30th 2014]
+ * [Introduction to pongo2 + migration- and "how to write tags/filters"-tutorial.](https://www.florian-schlachter.de/post/pongo2/) [June 29th 2014]
+
+## Caveats
+
+### Filters
+
+ * **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
+ * **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
+ * **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
+
+### Tags
+
+ * **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
+ * **now**: takes Go's time format (see **date** and **time**-filter).
+
+### Misc
+
+ * **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
+ `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
+
+# Add-ons, libraries and helpers
+
+## Official
+
+ * [ponginae](https://github.com/flosch/ponginae) - A web-framework for Go (using pongo2).
+ * [pongo2-tools](https://github.com/flosch/pongo2-tools) - Official tools and helpers for pongo2
+ * [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
+
+## 3rd-party
+
+ * [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
+ * [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
+ * [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
+ * [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+ * [pongo2gin](https://github.com/robvdl/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+ * [pongo2-trans](https://github.com/fromYukki/pongo2trans) - `trans`-tag implementation for internationalization
+ * [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
+
+Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
+
+# API-usage examples
+
+Please see the documentation for a full list of provided API methods.
+
+## A tiny example (template string)
+
+```Go
+// Compile the template first (i. e. creating the AST)
+tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+if err != nil {
+ panic(err)
+}
+// Now you can render the template with the given
+// pongo2.Context how often you want to.
+out, err := tpl.Execute(pongo2.Context{"name": "florian"})
+if err != nil {
+ panic(err)
+}
+fmt.Println(out) // Output: Hello Florian!
+```
+
+## Example server-usage (template file)
+
+```Go
+package main
+
+import (
+ "github.com/flosch/pongo2"
+ "net/http"
+)
+
+// Pre-compiling the templates at application startup using the
+// little Must()-helper function (Must() will panic if FromFile()
+// or FromString() will return with an error - that's it).
+// It's faster to pre-compile it anywhere at startup and only
+// execute the template later.
+var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
+
+func examplePage(w http.ResponseWriter, r *http.Request) {
+ // Execute the template per HTTP request
+ err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+func main() {
+ http.HandleFunc("/", examplePage)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+# Benchmark
+
+The benchmarks have been run on the my machine (`Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz`) using the command:
+
+ go test -bench . -cpu 1,2,4,8
+
+All benchmarks are compiling (depends on the benchmark) and executing the `template_tests/complex.tpl` template.
+
+The results are:
+
+ BenchmarkExecuteComplexWithSandboxActive 50000 60450 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-2 50000 56998 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-4 50000 60343 ns/op
+ BenchmarkExecuteComplexWithSandboxActive-8 50000 64229 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive 10000 164410 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-2 10000 156682 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-4 10000 164821 ns/op
+ BenchmarkCompileAndExecuteComplexWithSandboxActive-8 10000 171806 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive 50000 60428 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-2 50000 31887 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-4 100000 22810 ns/op
+ BenchmarkParallelExecuteComplexWithSandboxActive-8 100000 18820 ns/op
+ BenchmarkExecuteComplexWithoutSandbox 50000 56942 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-2 50000 56168 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-4 50000 57838 ns/op
+ BenchmarkExecuteComplexWithoutSandbox-8 50000 60539 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox 10000 162086 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-2 10000 159771 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-4 10000 163826 ns/op
+ BenchmarkCompileAndExecuteComplexWithoutSandbox-8 10000 169062 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox 50000 57152 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-2 50000 30276 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-4 100000 22065 ns/op
+ BenchmarkParallelExecuteComplexWithoutSandbox-8 100000 18034 ns/op
+
+Benchmarked on October 2nd 2014.
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/context.go b/Godeps/_workspace/src/github.com/flosch/pongo2/context.go
new file mode 100644
index 0000000..7b728ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/context.go
@@ -0,0 +1,125 @@
+package pongo2
+
+import (
+ "fmt"
+ "regexp"
+)
+
+var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
+
+// A Context type provides constants, variables, instances or functions to a template.
+//
+// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
+// Currently, context["pongo2"] contains the following keys:
+// 1. version: returns the version string
+//
+// Template examples for accessing items from your context:
+// {{ myconstant }}
+// {{ myfunc("test", 42) }}
+// {{ user.name }}
+// {{ pongo2.version }}
+type Context map[string]interface{}
+
+func (c Context) checkForValidIdentifiers() *Error {
+ for k, v := range c {
+ if !reIdentifiers.MatchString(k) {
+ return &Error{
+ Sender: "checkForValidIdentifiers",
+ ErrorMsg: fmt.Sprintf("Context-key '%s' (value: '%+v') is not a valid identifier.", k, v),
+ }
+ }
+ }
+ return nil
+}
+
+// Update updates this context with the key/value-pairs from another context.
+func (c Context) Update(other Context) Context {
+ for k, v := range other {
+ c[k] = v
+ }
+ return c
+}
+
+// ExecutionContext contains all data important for the current rendering state.
+//
+// If you're writing a custom tag, your tag's Execute()-function will
+// have access to the ExecutionContext. This struct stores anything
+// about the current rendering process's Context including
+// the Context provided by the user (field Public).
+// You can safely use the Private context to provide data to the user's
+// template (like a 'forloop'-information). The Shared-context is used
+// to share data between tags. All ExecutionContexts share this context.
+//
+// Please be careful when accessing the Public data.
+// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
+//
+// To create your own execution context within tags, use the
+// NewChildExecutionContext(parent) function.
+type ExecutionContext struct {
+ template *Template
+
+ Autoescape bool
+ Public Context
+ Private Context
+ Shared Context
+}
+
+var pongo2MetaContext = Context{
+ "version": Version,
+}
+
+func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
+ privateCtx := make(Context)
+
+ // Make the pongo2-related funcs/vars available to the context
+ privateCtx["pongo2"] = pongo2MetaContext
+
+ return &ExecutionContext{
+ template: tpl,
+
+ Public: ctx,
+ Private: privateCtx,
+ Autoescape: true,
+ }
+}
+
+func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
+ newctx := &ExecutionContext{
+ template: parent.template,
+
+ Public: parent.Public,
+ Private: make(Context),
+ Autoescape: parent.Autoescape,
+ }
+ newctx.Shared = parent.Shared
+
+ // Copy all existing private items
+ newctx.Private.Update(parent.Private)
+
+ return newctx
+}
+
+func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
+ filename := ctx.template.name
+ var line, col int
+ if token != nil {
+ // No tokens available
+ // TODO: Add location (from where?)
+ filename = token.Filename
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: ctx.template,
+ Filename: filename,
+ Line: line,
+ Column: col,
+ Token: token,
+ Sender: "execution",
+ ErrorMsg: msg,
+ }
+}
+
+func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
+ ctx.template.set.logf(format, args...)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/doc.go b/Godeps/_workspace/src/github.com/flosch/pongo2/doc.go
new file mode 100644
index 0000000..5a23e2b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/doc.go
@@ -0,0 +1,31 @@
+// A Django-syntax like template-engine
+//
+// Blog posts about pongo2 (including introduction and migration):
+// https://www.florian-schlachter.de/?tag=pongo2
+//
+// Complete documentation on the template language:
+// https://docs.djangoproject.com/en/dev/topics/templates/
+//
+// Try out pongo2 live in the pongo2 playground:
+// https://www.florian-schlachter.de/pongo2/
+//
+// Make sure to read README.md in the repository as well.
+//
+// A tiny example with template strings:
+//
+// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
+//
+// // Compile the template first (i. e. creating the AST)
+// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+// if err != nil {
+// panic(err)
+// }
+// // Now you can render the template with the given
+// // pongo2.Context how often you want to.
+// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
+// if err != nil {
+// panic(err)
+// }
+// fmt.Println(out) // Output: Hello Fred!
+//
+package pongo2
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/error.go b/Godeps/_workspace/src/github.com/flosch/pongo2/error.go
new file mode 100644
index 0000000..80d1147
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/error.go
@@ -0,0 +1,91 @@
+package pongo2
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+// The Error type is being used to address an error during lexing, parsing or
+// execution. If you want to return an error object (for example in your own
+// tag or filter) fill this object with as much information as you have.
+// Make sure "Sender" is always given (if you're returning an error within
+// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
+// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
+type Error struct {
+ Template *Template
+ Filename string
+ Line int
+ Column int
+ Token *Token
+ Sender string
+ ErrorMsg string
+}
+
+func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
+ if e.Template == nil {
+ e.Template = template
+ }
+
+ if e.Token == nil {
+ e.Token = t
+ if e.Line <= 0 {
+ e.Line = t.Line
+ e.Column = t.Col
+ }
+ }
+
+ return e
+}
+
+// Returns a nice formatted error string.
+func (e *Error) Error() string {
+ s := "[Error"
+ if e.Sender != "" {
+ s += " (where: " + e.Sender + ")"
+ }
+ if e.Filename != "" {
+ s += " in " + e.Filename
+ }
+ if e.Line > 0 {
+ s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
+ if e.Token != nil {
+ s += fmt.Sprintf(" near '%s'", e.Token.Val)
+ }
+ }
+ s += "] "
+ s += e.ErrorMsg
+ return s
+}
+
+// RawLine returns the affected line from the original template, if available.
+func (e *Error) RawLine() (line string, available bool) {
+ if e.Line <= 0 || e.Filename == "" {
+ return "", false
+ }
+
+ filename := e.Filename
+ if e.Template != nil {
+ filename = e.Template.set.resolveFilename(e.Template, e.Filename)
+ }
+ file, err := os.Open(filename)
+ if err != nil {
+ panic(err)
+ }
+ defer func() {
+ err := file.Close()
+ if err != nil {
+ panic(err)
+ }
+ }()
+
+ scanner := bufio.NewScanner(file)
+ l := 0
+ for scanner.Scan() {
+ l++
+ if l == e.Line {
+ return scanner.Text(), true
+ }
+ }
+ return "", false
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/filters.go b/Godeps/_workspace/src/github.com/flosch/pongo2/filters.go
new file mode 100644
index 0000000..13061f3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/filters.go
@@ -0,0 +1,133 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
+
+var filters map[string]FilterFunction
+
+func init() {
+ filters = make(map[string]FilterFunction)
+}
+
+// Registers a new filter. If there's already a filter with the same
+// name, RegisterFilter will panic. You usually want to call this
+// function in the filter's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterFilter(name string, fn FilterFunction) {
+ _, existing := filters[name]
+ if existing {
+ panic(fmt.Sprintf("Filter with name '%s' is already registered.", name))
+ }
+ filters[name] = fn
+}
+
+// Replaces an already registered filter with a new implementation. Use this
+// function with caution since it allows you to change existing filter behaviour.
+func ReplaceFilter(name string, fn FilterFunction) {
+ _, existing := filters[name]
+ if !existing {
+ panic(fmt.Sprintf("Filter with name '%s' does not exist (therefore cannot be overridden).", name))
+ }
+ filters[name] = fn
+}
+
+// Like ApplyFilter, but panics on an error
+func MustApplyFilter(name string, value *Value, param *Value) *Value {
+ val, err := ApplyFilter(name, value, param)
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+// Applies a filter to a given value using the given parameters. Returns a *pongo2.Value or an error.
+func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
+ fn, existing := filters[name]
+ if !existing {
+ return nil, &Error{
+ Sender: "applyfilter",
+ ErrorMsg: fmt.Sprintf("Filter with name '%s' not found.", name),
+ }
+ }
+
+ // Make sure param is a *Value
+ if param == nil {
+ param = AsValue(nil)
+ }
+
+ return fn(value, param)
+}
+
+type filterCall struct {
+ token *Token
+
+ name string
+ parameter IEvaluator
+
+ filterFunc FilterFunction
+}
+
+func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
+ var param *Value
+ var err *Error
+
+ if fc.parameter != nil {
+ param, err = fc.parameter.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+
+ filteredValue, err := fc.filterFunc(v, param)
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
+ }
+ return filteredValue, nil
+}
+
+// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
+func (p *Parser) parseFilter() (*filterCall, *Error) {
+ identToken := p.MatchType(TokenIdentifier)
+
+ // Check filter ident
+ if identToken == nil {
+ return nil, p.Error("Filter name must be an identifier.", nil)
+ }
+
+ filter := &filterCall{
+ token: identToken,
+ name: identToken.Val,
+ }
+
+ // Get the appropriate filter function and bind it
+ filterFn, exists := filters[identToken.Val]
+ if !exists {
+ return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
+ }
+
+ filter.filterFunc = filterFn
+
+ // Check for filter-argument (2 tokens needed: ':' ARG)
+ if p.Match(TokenSymbol, ":") != nil {
+ if p.Peek(TokenSymbol, "}}") != nil {
+ return nil, p.Error("Filter parameter required after ':'.", nil)
+ }
+
+ // Get filter argument expression
+ v, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filter.parameter = v
+ }
+
+ return filter, nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/filters_builtin.go b/Godeps/_workspace/src/github.com/flosch/pongo2/filters_builtin.go
new file mode 100644
index 0000000..a267fa2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/filters_builtin.go
@@ -0,0 +1,901 @@
+package pongo2
+
+/* Filters that are provided through github.com/flosch/pongo2-addons:
+ ------------------------------------------------------------------
+
+ filesizeformat
+ slugify
+ timesince
+ timeuntil
+
+ Filters that won't be added:
+ ----------------------------
+
+ get_static_prefix (reason: web-framework specific)
+ pprint (reason: python-specific)
+ static (reason: web-framework specific)
+
+ Reconsideration (not implemented yet):
+ --------------------------------------
+
+ force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
+ safeseq (reason: same reason as `force_escape`)
+ unordered_list (python-specific; not sure whether needed or not)
+ dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
+ dictsortreversed (see dictsort)
+*/
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterFilter("escape", filterEscape)
+ RegisterFilter("safe", filterSafe)
+ RegisterFilter("escapejs", filterEscapejs)
+
+ RegisterFilter("add", filterAdd)
+ RegisterFilter("addslashes", filterAddslashes)
+ RegisterFilter("capfirst", filterCapfirst)
+ RegisterFilter("center", filterCenter)
+ RegisterFilter("cut", filterCut)
+ RegisterFilter("date", filterDate)
+ RegisterFilter("default", filterDefault)
+ RegisterFilter("default_if_none", filterDefaultIfNone)
+ RegisterFilter("divisibleby", filterDivisibleby)
+ RegisterFilter("first", filterFirst)
+ RegisterFilter("floatformat", filterFloatformat)
+ RegisterFilter("get_digit", filterGetdigit)
+ RegisterFilter("iriencode", filterIriencode)
+ RegisterFilter("join", filterJoin)
+ RegisterFilter("last", filterLast)
+ RegisterFilter("length", filterLength)
+ RegisterFilter("length_is", filterLengthis)
+ RegisterFilter("linebreaks", filterLinebreaks)
+ RegisterFilter("linebreaksbr", filterLinebreaksbr)
+ RegisterFilter("linenumbers", filterLinenumbers)
+ RegisterFilter("ljust", filterLjust)
+ RegisterFilter("lower", filterLower)
+ RegisterFilter("make_list", filterMakelist)
+ RegisterFilter("phone2numeric", filterPhone2numeric)
+ RegisterFilter("pluralize", filterPluralize)
+ RegisterFilter("random", filterRandom)
+ RegisterFilter("removetags", filterRemovetags)
+ RegisterFilter("rjust", filterRjust)
+ RegisterFilter("slice", filterSlice)
+ RegisterFilter("stringformat", filterStringformat)
+ RegisterFilter("striptags", filterStriptags)
+ RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
+ RegisterFilter("title", filterTitle)
+ RegisterFilter("truncatechars", filterTruncatechars)
+ RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
+ RegisterFilter("truncatewords", filterTruncatewords)
+ RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
+ RegisterFilter("upper", filterUpper)
+ RegisterFilter("urlencode", filterUrlencode)
+ RegisterFilter("urlize", filterUrlize)
+ RegisterFilter("urlizetrunc", filterUrlizetrunc)
+ RegisterFilter("wordcount", filterWordcount)
+ RegisterFilter("wordwrap", filterWordwrap)
+ RegisterFilter("yesno", filterYesno)
+
+ RegisterFilter("float", filterFloat) // pongo-specific
+ RegisterFilter("integer", filterInteger) // pongo-specific
+}
+
+func filterTruncatecharsHelper(s string, newLen int) string {
+ runes := []rune(s)
+ if newLen < len(runes) {
+ if newLen >= 3 {
+ return fmt.Sprintf("%s...", string(runes[:newLen-3]))
+ }
+ // Not enough space for the ellipsis
+ return string(runes[:newLen])
+ }
+ return string(runes)
+}
+
+func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
+ vLen := len(value)
+ var tagStack []string
+ idx := 0
+
+ for idx < vLen && !cond() {
+ c, s := utf8.DecodeRuneInString(value[idx:])
+ if c == utf8.RuneError {
+ idx += s
+ continue
+ }
+
+ if c == '<' {
+ newOutput.WriteRune(c)
+ idx += s // consume "<"
+
+ if idx+1 < vLen {
+ if value[idx] == '/' {
+ // Close tag
+
+ newOutput.WriteString("/")
+
+ tag := ""
+ idx++ // consume "/"
+
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+ tag += string(c2)
+ idx += size2
+ }
+
+ if len(tagStack) > 0 {
+ // Ideally, the close tag is TOP of tag stack
+ // In malformed HTML, it must not be, so iterate through the stack and remove the tag
+ for i := len(tagStack) - 1; i >= 0; i-- {
+ if tagStack[i] == tag {
+ // Found the tag
+ tagStack[i] = tagStack[len(tagStack)-1]
+ tagStack = tagStack[:len(tagStack)-1]
+ break
+ }
+ }
+ }
+
+ newOutput.WriteString(tag)
+ newOutput.WriteString(">")
+ } else {
+ // Open tag
+
+ tag := ""
+
+ params := false
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ newOutput.WriteRune(c2)
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+
+ if !params {
+ if c2 == ' ' {
+ params = true
+ } else {
+ tag += string(c2)
+ }
+ }
+
+ idx += size2
+ }
+
+ // Add tag to stack
+ tagStack = append(tagStack, tag)
+ }
+ }
+ } else {
+ idx = fn(c, s, idx)
+ }
+ }
+
+ finalize()
+
+ for i := len(tagStack) - 1; i >= 0; i-- {
+ tag := tagStack[i]
+ // Close everything from the regular tag stack
+ newOutput.WriteString(fmt.Sprintf("%s>", tag))
+ }
+}
+
+func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ newLen := param.Integer()
+ return AsValue(filterTruncatecharsHelper(s, newLen)), nil
+}
+
+func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer()-3, 0)
+
+ newOutput := bytes.NewBuffer(nil)
+
+ textcounter := 0
+
+ filterTruncateHTMLHelper(value, newOutput, func() bool {
+ return textcounter >= newLen
+ }, func(c rune, s int, idx int) int {
+ textcounter++
+ newOutput.WriteRune(c)
+
+ return idx + s
+ }, func() {
+ if textcounter >= newLen && textcounter < len(value) {
+ newOutput.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(newOutput.String()), nil
+}
+
+func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ n := param.Integer()
+ if n <= 0 {
+ return AsValue(""), nil
+ }
+ nlen := min(len(words), n)
+ out := make([]string, 0, nlen)
+ for i := 0; i < nlen; i++ {
+ out = append(out, words[i])
+ }
+
+ if n < len(words) {
+ out = append(out, "...")
+ }
+
+ return AsValue(strings.Join(out, " ")), nil
+}
+
+func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer(), 0)
+
+ newOutput := bytes.NewBuffer(nil)
+
+ wordcounter := 0
+
+ filterTruncateHTMLHelper(value, newOutput, func() bool {
+ return wordcounter >= newLen
+ }, func(_ rune, _ int, idx int) int {
+ // Get next word
+ wordFound := false
+
+ for idx < len(value) {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ if c2 == '<' {
+ // HTML tag start, don't consume it
+ return idx
+ }
+
+ newOutput.WriteRune(c2)
+ idx += size2
+
+ if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
+ // Word ends here, stop capturing it now
+ break
+ } else {
+ wordFound = true
+ }
+ }
+
+ if wordFound {
+ wordcounter++
+ }
+
+ return idx
+ }, func() {
+ if wordcounter >= newLen {
+ newOutput.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(newOutput.String()), nil
+}
+
+func filterEscape(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "&", "&", -1)
+ output = strings.Replace(output, ">", ">", -1)
+ output = strings.Replace(output, "<", "<", -1)
+ output = strings.Replace(output, "\"", """, -1)
+ output = strings.Replace(output, "'", "'", -1)
+ return AsValue(output), nil
+}
+
+func filterSafe(in *Value, param *Value) (*Value, *Error) {
+ return in, nil // nothing to do here, just to keep track of the safe application
+}
+
+func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+
+ var b bytes.Buffer
+
+ idx := 0
+ for idx < len(sin) {
+ c, size := utf8.DecodeRuneInString(sin[idx:])
+ if c == utf8.RuneError {
+ idx += size
+ continue
+ }
+
+ if c == '\\' {
+ // Escape seq?
+ if idx+1 < len(sin) {
+ switch sin[idx+1] {
+ case 'r':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
+ idx += 2
+ continue
+ case 'n':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
+ idx += 2
+ continue
+ /*case '\'':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
+ idx += 2
+ continue
+ case '"':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
+ idx += 2
+ continue*/
+ }
+ }
+ }
+
+ if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
+ b.WriteRune(c)
+ } else {
+ b.WriteString(fmt.Sprintf(`\u%04X`, c))
+ }
+
+ idx += size
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterAdd(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() && param.IsNumber() {
+ if in.IsFloat() || param.IsFloat() {
+ return AsValue(in.Float() + param.Float()), nil
+ }
+ return AsValue(in.Integer() + param.Integer()), nil
+ }
+ // If in/param is not a number, we're relying on the
+ // Value's String() convertion and just add them both together
+ return AsValue(in.String() + param.String()), nil
+}
+
+func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "\\", "\\\\", -1)
+ output = strings.Replace(output, "\"", "\\\"", -1)
+ output = strings.Replace(output, "'", "\\'", -1)
+ return AsValue(output), nil
+}
+
+func filterCut(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
+}
+
+func filterLength(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len()), nil
+}
+
+func filterLengthis(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len() == param.Integer()), nil
+}
+
+func filterDefault(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsTrue() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNil() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
+ if param.Integer() == 0 {
+ return AsValue(false), nil
+ }
+ return AsValue(in.Integer()%param.Integer() == 0), nil
+}
+
+func filterFirst(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(0), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
+ val := in.Float()
+
+ decimals := -1
+ if !param.IsNil() {
+ // Any argument provided?
+ decimals = param.Integer()
+ }
+
+ // if the argument is not a number (e. g. empty), the default
+ // behaviour is trim the result
+ trim := !param.IsNumber()
+
+ if decimals <= 0 {
+ // argument is negative or zero, so we
+ // want the output being trimmed
+ decimals = -decimals
+ trim = true
+ }
+
+ if trim {
+ // Remove zeroes
+ if float64(int(val)) == val {
+ return AsValue(in.Integer()), nil
+ }
+ }
+
+ return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
+}
+
+func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
+ i := param.Integer()
+ l := len(in.String()) // do NOT use in.Len() here!
+ if i <= 0 || i > l {
+ return in, nil
+ }
+ return AsValue(in.String()[l-i] - 48), nil
+}
+
+const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
+
+func filterIriencode(in *Value, param *Value) (*Value, *Error) {
+ var b bytes.Buffer
+
+ sin := in.String()
+ for _, r := range sin {
+ if strings.IndexRune(filterIRIChars, r) >= 0 {
+ b.WriteRune(r)
+ } else {
+ b.WriteString(url.QueryEscape(string(r)))
+ }
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterJoin(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() {
+ return in, nil
+ }
+ sep := param.String()
+ sl := make([]string, 0, in.Len())
+ for i := 0; i < in.Len(); i++ {
+ sl = append(sl, in.Index(i).String())
+ }
+ return AsValue(strings.Join(sl, sep)), nil
+}
+
+func filterLast(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(in.Len() - 1), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterUpper(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToUpper(in.String())), nil
+}
+
+func filterLower(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToLower(in.String())), nil
+}
+
+func filterMakelist(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ result := make([]string, 0, len(s))
+ for _, c := range s {
+ result = append(result, string(c))
+ }
+ return AsValue(result), nil
+}
+
+func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() <= 0 {
+ return AsValue(""), nil
+ }
+ t := in.String()
+ r, size := utf8.DecodeRuneInString(t)
+ return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
+}
+
+func filterCenter(in *Value, param *Value) (*Value, *Error) {
+ width := param.Integer()
+ slen := in.Len()
+ if width <= slen {
+ return in, nil
+ }
+
+ spaces := width - slen
+ left := spaces/2 + spaces%2
+ right := spaces / 2
+
+ return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
+ in.String(), strings.Repeat(" ", right))), nil
+}
+
+func filterDate(in *Value, param *Value) (*Value, *Error) {
+ t, isTime := in.Interface().(time.Time)
+ if !isTime {
+ return nil, &Error{
+ Sender: "filter:date",
+ ErrorMsg: "Filter input argument must be of type 'time.Time'.",
+ }
+ }
+ return AsValue(t.Format(param.String())), nil
+}
+
+func filterFloat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Float()), nil
+}
+
+func filterInteger(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Integer()), nil
+}
+
+func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() == 0 {
+ return in, nil
+ }
+
+ var b bytes.Buffer
+
+ // Newline =
+ // Double newline = ...
+ lines := strings.Split(in.String(), "\n")
+ lenlines := len(lines)
+
+ opened := false
+
+ for idx, line := range lines {
+
+ if !opened {
+ b.WriteString("")
+ opened = true
+ }
+
+ b.WriteString(line)
+
+ if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
+ // We've not reached the end
+ if strings.TrimSpace(lines[idx+1]) == "" {
+ // Next line is empty
+ if opened {
+ b.WriteString("
")
+ opened = false
+ }
+ } else {
+ b.WriteString(" ")
+ }
+ }
+ }
+
+ if opened {
+ b.WriteString("
")
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), "\n", " ", -1)), nil
+}
+
+func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
+ lines := strings.Split(in.String(), "\n")
+ output := make([]string, 0, len(lines))
+ for idx, line := range lines {
+ output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
+ }
+ return AsValue(strings.Join(output, "\n")), nil
+}
+
+func filterLjust(in *Value, param *Value) (*Value, *Error) {
+ times := param.Integer() - in.Len()
+ if times < 0 {
+ times = 0
+ }
+ return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
+}
+
+func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(url.QueryEscape(in.String())), nil
+}
+
+// TODO: This regexp could do some work
+var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
+var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
+
+func filterUrlizeHelper(input string, autoescape bool, trunc int) string {
+ sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
+ var prefix string
+ var suffix string
+ if strings.HasPrefix(raw_url, " ") {
+ prefix = " "
+ }
+ if strings.HasSuffix(raw_url, " ") {
+ suffix = " "
+ }
+
+ raw_url = strings.TrimSpace(raw_url)
+
+ t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
+ if err != nil {
+ panic(err)
+ }
+ url := t.String()
+
+ if !strings.HasPrefix(url, "http") {
+ url = fmt.Sprintf("http://%s", url)
+ }
+
+ title := raw_url
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ if autoescape {
+ t, err := ApplyFilter("escape", AsValue(title), nil)
+ if err != nil {
+ panic(err)
+ }
+ title = t.String()
+ }
+
+ return fmt.Sprintf(`%s%s %s`, prefix, url, title, suffix)
+ })
+
+ sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
+
+ title := mail
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ return fmt.Sprintf(`%s `, mail, title)
+ })
+
+ return sout
+}
+
+func filterUrlize(in *Value, param *Value) (*Value, *Error) {
+ autoescape := true
+ if param.IsBool() {
+ autoescape = param.Bool()
+ }
+
+ return AsValue(filterUrlizeHelper(in.String(), autoescape, -1)), nil
+}
+
+func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(filterUrlizeHelper(in.String(), true, param.Integer())), nil
+}
+
+func filterStringformat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
+}
+
+var reStriptags = regexp.MustCompile("<[^>]*?>")
+
+func filterStriptags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+
+ // Strip all tags
+ s = reStriptags.ReplaceAllString(s, "")
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+// https://en.wikipedia.org/wiki/Phoneword
+var filterPhone2numericMap = map[string]string{
+ "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
+ "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
+ "w": "9", "x": "9", "y": "9", "z": "9",
+}
+
+func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+ for k, v := range filterPhone2numericMap {
+ sin = strings.Replace(sin, k, v, -1)
+ sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
+ }
+ return AsValue(sin), nil
+}
+
+func filterPluralize(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() {
+ // Works only on numbers
+ if param.Len() > 0 {
+ endings := strings.Split(param.String(), ",")
+ if len(endings) > 2 {
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ ErrorMsg: "You cannot pass more than 2 arguments to filter 'pluralize'.",
+ }
+ }
+ if len(endings) == 1 {
+ // 1 argument
+ if in.Integer() != 1 {
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // 2 arguments
+ return AsValue(endings[1]), nil
+ }
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // return default 's'
+ return AsValue("s"), nil
+ }
+ }
+
+ return AsValue(""), nil
+ }
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ ErrorMsg: "Filter 'pluralize' does only work on numbers.",
+ }
+}
+
+func filterRandom(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() || in.Len() <= 0 {
+ return in, nil
+ }
+ i := rand.Intn(in.Len())
+ return in.Index(i), nil
+}
+
+func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ tags := strings.Split(param.String(), ",")
+
+ // Strip only specific tags
+ for _, tag := range tags {
+ re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
+ s = re.ReplaceAllString(s, "")
+ }
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+func filterRjust(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
+}
+
+func filterSlice(in *Value, param *Value) (*Value, *Error) {
+ comp := strings.Split(param.String(), ":")
+ if len(comp) != 2 {
+ return nil, &Error{
+ Sender: "filter:slice",
+ ErrorMsg: "Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]",
+ }
+ }
+
+ if !in.CanSlice() {
+ return in, nil
+ }
+
+ from := AsValue(comp[0]).Integer()
+ to := in.Len()
+
+ if from > to {
+ from = to
+ }
+
+ vto := AsValue(comp[1]).Integer()
+ if vto >= from && vto <= in.Len() {
+ to = vto
+ }
+
+ return in.Slice(from, to), nil
+}
+
+func filterTitle(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsString() {
+ return AsValue(""), nil
+ }
+ return AsValue(strings.Title(strings.ToLower(in.String()))), nil
+}
+
+func filterWordcount(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(len(strings.Fields(in.String()))), nil
+}
+
+func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ wordsLen := len(words)
+ wrapAt := param.Integer()
+ if wrapAt <= 0 {
+ return in, nil
+ }
+
+ linecount := wordsLen/wrapAt + wordsLen%wrapAt
+ lines := make([]string, 0, linecount)
+ for i := 0; i < linecount; i++ {
+ lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
+ }
+ return AsValue(strings.Join(lines, "\n")), nil
+}
+
+func filterYesno(in *Value, param *Value) (*Value, *Error) {
+ choices := map[int]string{
+ 0: "yes",
+ 1: "no",
+ 2: "maybe",
+ }
+ paramString := param.String()
+ customChoices := strings.Split(paramString, ",")
+ if len(paramString) > 0 {
+ if len(customChoices) > 3 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ ErrorMsg: fmt.Sprintf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
+ }
+ }
+ if len(customChoices) < 2 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ ErrorMsg: fmt.Sprintf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
+ }
+ }
+
+ // Map to the options now
+ choices[0] = customChoices[0]
+ choices[1] = customChoices[1]
+ if len(customChoices) == 3 {
+ choices[2] = customChoices[2]
+ }
+ }
+
+ // maybe
+ if in.IsNil() {
+ return AsValue(choices[2]), nil
+ }
+
+ // yes
+ if in.IsTrue() {
+ return AsValue(choices[0]), nil
+ }
+
+ // no
+ return AsValue(choices[1]), nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/helpers.go b/Godeps/_workspace/src/github.com/flosch/pongo2/helpers.go
new file mode 100644
index 0000000..880dbc0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/helpers.go
@@ -0,0 +1,15 @@
+package pongo2
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/lexer.go b/Godeps/_workspace/src/github.com/flosch/pongo2/lexer.go
new file mode 100644
index 0000000..1698e41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/lexer.go
@@ -0,0 +1,421 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ TokenError = iota
+ EOF
+
+ TokenHTML
+
+ TokenKeyword
+ TokenIdentifier
+ TokenString
+ TokenNumber
+ TokenSymbol
+)
+
+var (
+ tokenSpaceChars = " \n\r\t"
+ tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
+ tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
+ tokenDigits = "0123456789"
+
+ // Available symbols in pongo2 (within filters/tag)
+ TokenSymbols = []string{
+ // 3-Char symbols
+
+ // 2-Char symbols
+ "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
+
+ // 1-Char symbol
+ "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
+ }
+
+ // Available keywords in pongo2
+ TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
+)
+
+type TokenType int
+type Token struct {
+ Filename string
+ Typ TokenType
+ Val string
+ Line int
+ Col int
+}
+
+type lexerStateFn func() lexerStateFn
+type lexer struct {
+ name string
+ input string
+ start int // start pos of the item
+ pos int // current pos
+ width int // width of last rune
+ tokens []*Token
+ errored bool
+ startline int
+ startcol int
+ line int
+ col int
+
+ inVerbatim bool
+ verbatimName string
+}
+
+func (t *Token) String() string {
+ val := t.Val
+ if len(val) > 1000 {
+ val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:len(val)])
+ }
+
+ typ := ""
+ switch t.Typ {
+ case TokenHTML:
+ typ = "HTML"
+ case TokenError:
+ typ = "Error"
+ case TokenIdentifier:
+ typ = "Identifier"
+ case TokenKeyword:
+ typ = "Keyword"
+ case TokenNumber:
+ typ = "Number"
+ case TokenString:
+ typ = "String"
+ case TokenSymbol:
+ typ = "Symbol"
+ default:
+ typ = "Unknown"
+ }
+
+ return fmt.Sprintf("",
+ typ, t.Typ, val, t.Line, t.Col)
+}
+
+func lex(name string, input string) ([]*Token, *Error) {
+ l := &lexer{
+ name: name,
+ input: input,
+ tokens: make([]*Token, 0, 100),
+ line: 1,
+ col: 1,
+ startline: 1,
+ startcol: 1,
+ }
+ l.run()
+ if l.errored {
+ errtoken := l.tokens[len(l.tokens)-1]
+ return nil, &Error{
+ Filename: name,
+ Line: errtoken.Line,
+ Column: errtoken.Col,
+ Sender: "lexer",
+ ErrorMsg: errtoken.Val,
+ }
+ }
+ return l.tokens, nil
+}
+
+func (l *lexer) value() string {
+ return l.input[l.start:l.pos]
+}
+
+func (l *lexer) length() int {
+ return l.pos - l.start
+}
+
+func (l *lexer) emit(t TokenType) {
+ tok := &Token{
+ Filename: l.name,
+ Typ: t,
+ Val: l.value(),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+
+ if t == TokenString {
+ // Escape sequence \" in strings
+ tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
+ tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
+ }
+
+ l.tokens = append(l.tokens, tok)
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) next() rune {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return EOF
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ l.col += l.width
+ return r
+}
+
+func (l *lexer) backup() {
+ l.pos -= l.width
+ l.col -= l.width
+}
+
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func (l *lexer) ignore() {
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) accept(what string) bool {
+ if strings.IndexRune(what, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *lexer) acceptRun(what string) {
+ for strings.IndexRune(what, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
+ t := &Token{
+ Filename: l.name,
+ Typ: TokenError,
+ Val: fmt.Sprintf(format, args...),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+ l.tokens = append(l.tokens, t)
+ l.errored = true
+ l.startline = l.line
+ l.startcol = l.col
+ return nil
+}
+
+func (l *lexer) eof() bool {
+ return l.start >= len(l.input)-1
+}
+
+func (l *lexer) run() {
+ for {
+ // TODO: Support verbatim tag names
+ // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
+ if l.inVerbatim {
+ name := l.verbatimName
+ if name != "" {
+ name += " "
+ }
+ if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ w := len("{% endverbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ l.inVerbatim = false
+ }
+ } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.inVerbatim = true
+ w := len("{% verbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ }
+
+ if !l.inVerbatim {
+ // Ignore single-line comments {# ... #}
+ if strings.HasPrefix(l.input[l.pos:], "{#") {
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ l.pos += 2 // pass '{#'
+ l.col += 2
+
+ for {
+ switch l.peek() {
+ case EOF:
+ l.errorf("Single-line comment not closed.")
+ return
+ case '\n':
+ l.errorf("Newline not permitted in a single-line comment.")
+ return
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "#}") {
+ l.pos += 2 // pass '#}'
+ l.col += 2
+ break
+ }
+
+ l.next()
+ }
+ l.ignore() // ignore whole comment
+
+ // Comment skipped
+ continue // next token
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
+ strings.HasPrefix(l.input[l.pos:], "{%") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.tokenize()
+ if l.errored {
+ return
+ }
+ continue
+ }
+ }
+
+ switch l.peek() {
+ case '\n':
+ l.line++
+ l.col = 0
+ }
+ if l.next() == EOF {
+ break
+ }
+ }
+
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ if l.inVerbatim {
+ l.errorf("verbatim-tag not closed, got EOF.")
+ }
+}
+
+func (l *lexer) tokenize() {
+ for state := l.stateCode; state != nil; {
+ state = state()
+ }
+}
+
+func (l *lexer) stateCode() lexerStateFn {
+outer_loop:
+ for {
+ switch {
+ case l.accept(tokenSpaceChars):
+ if l.value() == "\n" {
+ return l.errorf("Newline not allowed within tag/variable.")
+ }
+ l.ignore()
+ continue
+ case l.accept(tokenIdentifierChars):
+ return l.stateIdentifier
+ case l.accept(tokenDigits):
+ return l.stateNumber
+ case l.accept(`"`):
+ return l.stateString
+ }
+
+ // Check for symbol
+ for _, sym := range TokenSymbols {
+ if strings.HasPrefix(l.input[l.start:], sym) {
+ l.pos += len(sym)
+ l.col += l.length()
+ l.emit(TokenSymbol)
+
+ if sym == "%}" || sym == "}}" {
+ // Tag/variable end, return after emit
+ return nil
+ }
+
+ continue outer_loop
+ }
+ }
+
+ if l.pos < len(l.input) {
+ return l.errorf("Unknown character: %q (%d)", l.peek(), l.peek())
+ }
+
+ break
+ }
+
+ // Normal shut down
+ return nil
+}
+
+func (l *lexer) stateIdentifier() lexerStateFn {
+ l.acceptRun(tokenIdentifierChars)
+ l.acceptRun(tokenIdentifierCharsWithDigits)
+ for _, kw := range TokenKeywords {
+ if kw == l.value() {
+ l.emit(TokenKeyword)
+ return l.stateCode
+ }
+ }
+ l.emit(TokenIdentifier)
+ return l.stateCode
+}
+
+func (l *lexer) stateNumber() lexerStateFn {
+ l.acceptRun(tokenDigits)
+ /*
+ Maybe context-sensitive number lexing?
+ * comments.0.Text // first comment
+ * usercomments.1.0 // second user, first comment
+ * if (score >= 8.5) // 8.5 as a number
+
+ if l.peek() == '.' {
+ l.accept(".")
+ if !l.accept(tokenDigits) {
+ return l.errorf("Malformed number.")
+ }
+ l.acceptRun(tokenDigits)
+ }
+ */
+ l.emit(TokenNumber)
+ return l.stateCode
+}
+
+func (l *lexer) stateString() lexerStateFn {
+ l.ignore()
+ l.startcol-- // we're starting the position at the first "
+ for !l.accept(`"`) {
+ switch l.next() {
+ case '\\':
+ // escape sequence
+ switch l.peek() {
+ case '"', '\\':
+ l.next()
+ default:
+ return l.errorf("Unknown escape sequence: \\%c", l.peek())
+ }
+ case EOF:
+ return l.errorf("Unexpected EOF, string not closed.")
+ case '\n':
+ return l.errorf("Newline in string is not allowed.")
+ }
+ }
+ l.backup()
+ l.emit(TokenString)
+
+ l.next()
+ l.ignore()
+
+ return l.stateCode
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/nodes.go b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes.go
new file mode 100644
index 0000000..5b039cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes.go
@@ -0,0 +1,16 @@
+package pongo2
+
+// The root document
+type nodeDocument struct {
+ Nodes []INode
+}
+
+func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range doc.Nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_html.go b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_html.go
new file mode 100644
index 0000000..9680285
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_html.go
@@ -0,0 +1,10 @@
+package pongo2
+
+type nodeHTML struct {
+ token *Token
+}
+
+func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString(n.token.Val)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_wrapper.go b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_wrapper.go
new file mode 100644
index 0000000..d1bcb8d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/nodes_wrapper.go
@@ -0,0 +1,16 @@
+package pongo2
+
+type NodeWrapper struct {
+ Endtag string
+ nodes []INode
+}
+
+func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range wrapper.nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/parser.go b/Godeps/_workspace/src/github.com/flosch/pongo2/parser.go
new file mode 100644
index 0000000..bed5061
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/parser.go
@@ -0,0 +1,265 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+)
+
+type INode interface {
+ Execute(*ExecutionContext, TemplateWriter) *Error
+}
+
+type IEvaluator interface {
+ INode
+ GetPositionToken() *Token
+ Evaluate(*ExecutionContext) (*Value, *Error)
+ FilterApplied(name string) bool
+}
+
+// The parser provides you a comprehensive and easy tool to
+// work with the template document and arguments provided by
+// the user for your custom tag.
+//
+// The parser works on a token list which will be provided by pongo2.
+// A token is a unit you can work with. Tokens are either of type identifier,
+// string, number, keyword, HTML or symbol.
+//
+// (See Token's documentation for more about tokens)
+type Parser struct {
+ name string
+ idx int
+ tokens []*Token
+ lastToken *Token
+
+ // if the parser parses a template document, here will be
+ // a reference to it (needed to access the template through Tags)
+ template *Template
+}
+
+// Creates a new parser to parse tokens.
+// Used inside pongo2 to parse documents and to provide an easy-to-use
+// parser for tag authors
+func newParser(name string, tokens []*Token, template *Template) *Parser {
+ p := &Parser{
+ name: name,
+ tokens: tokens,
+ template: template,
+ }
+ if len(tokens) > 0 {
+ p.lastToken = tokens[len(tokens)-1]
+ }
+ return p
+}
+
+// Consume one token. It will be gone forever.
+func (p *Parser) Consume() {
+ p.ConsumeN(1)
+}
+
+// Consume N tokens. They will be gone forever.
+func (p *Parser) ConsumeN(count int) {
+ p.idx += count
+}
+
+// Returns the current token.
+func (p *Parser) Current() *Token {
+ return p.Get(p.idx)
+}
+
+// Returns the CURRENT token if the given type matches.
+// Consumes this token on success.
+func (p *Parser) MatchType(typ TokenType) *Token {
+ if t := p.PeekType(typ); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// Consumes this token on success.
+func (p *Parser) Match(typ TokenType, val string) *Token {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// Consumes this token on success.
+func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
+ for _, val := range vals {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekType(typ TokenType) *Token {
+ return p.PeekTypeN(0, typ)
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// It DOES NOT consume the token.
+func (p *Parser) Peek(typ TokenType, val string) *Token {
+ return p.PeekN(0, typ, val)
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
+ for _, v := range vals {
+ t := p.PeekN(0, typ, v)
+ if t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the
+// given type AND value matches for that token.
+// DOES NOT consume the token.
+func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ && t.Val == val {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the given type matches.
+// DOES NOT consume the token for that token.
+func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the UNCONSUMED token count.
+func (p *Parser) Remaining() int {
+ return len(p.tokens) - p.idx
+}
+
+// Returns the total token count.
+func (p *Parser) Count() int {
+ return len(p.tokens)
+}
+
+// Returns tokens[i] or NIL (if i >= len(tokens))
+func (p *Parser) Get(i int) *Token {
+ if i < len(p.tokens) {
+ return p.tokens[i]
+ }
+ return nil
+}
+
+// Returns tokens[current-position + shift] or NIL
+// (if (current-position + i) >= len(tokens))
+func (p *Parser) GetR(shift int) *Token {
+ i := p.idx + shift
+ return p.Get(i)
+}
+
+// Produces a nice error message and returns an error-object.
+// The 'token'-argument is optional. If provided, it will take
+// the token's position information. If not provided, it will
+// automatically use the CURRENT token's position information.
+func (p *Parser) Error(msg string, token *Token) *Error {
+ if token == nil {
+ // Set current token
+ token = p.Current()
+ if token == nil {
+ // Set to last token
+ if len(p.tokens) > 0 {
+ token = p.tokens[len(p.tokens)-1]
+ }
+ }
+ }
+ var line, col int
+ if token != nil {
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: p.template,
+ Filename: p.name,
+ Sender: "parser",
+ Line: line,
+ Column: col,
+ Token: token,
+ ErrorMsg: msg,
+ }
+}
+
+// Wraps all nodes between starting tag and "{% endtag %}" and provides
+// one simple interface to execute the wrapped nodes.
+// It returns a parser to process provided arguments to the tag.
+func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
+ wrapper := &NodeWrapper{}
+
+ var tagArgs []*Token
+
+ for p.Remaining() > 0 {
+ // New tag, check whether we have to stop wrapping here
+ if p.Peek(TokenSymbol, "{%") != nil {
+ tagIdent := p.PeekTypeN(1, TokenIdentifier)
+
+ if tagIdent != nil {
+ // We've found a (!) end-tag
+
+ found := false
+ for _, n := range names {
+ if tagIdent.Val == n {
+ found = true
+ break
+ }
+ }
+
+ // We only process the tag if we've found an end tag
+ if found {
+ // Okay, endtag found.
+ p.ConsumeN(2) // '{%' tagname
+
+ for {
+ if p.Match(TokenSymbol, "%}") != nil {
+ // Okay, end the wrapping here
+ wrapper.Endtag = tagIdent.Val
+ return wrapper, newParser(p.template.name, tagArgs, p.template), nil
+ }
+ t := p.Current()
+ p.Consume()
+ if t == nil {
+ return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
+ }
+ tagArgs = append(tagArgs, t)
+ }
+ }
+ }
+
+ }
+
+ // Otherwise process next element to be wrapped
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, nil, err
+ }
+ wrapper.nodes = append(wrapper.nodes, node)
+ }
+
+ return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
+ p.lastToken)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/parser_document.go b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_document.go
new file mode 100644
index 0000000..4ab8b93
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_document.go
@@ -0,0 +1,54 @@
+package pongo2
+
+// Doc = { ( Filter | Tag | HTML ) }
+func (p *Parser) parseDocElement() (INode, *Error) {
+ t := p.Current()
+
+ switch t.Typ {
+ case TokenHTML:
+ p.Consume() // consume HTML element
+ return &nodeHTML{token: t}, nil
+ case TokenSymbol:
+ switch t.Val {
+ case "{{":
+ // parse variable
+ variable, err := p.parseVariableElement()
+ if err != nil {
+ return nil, err
+ }
+ return variable, nil
+ case "{%":
+ // parse tag
+ tag, err := p.parseTagElement()
+ if err != nil {
+ return nil, err
+ }
+ return tag, nil
+ }
+ }
+ return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
+}
+
+func (tpl *Template) parse() *Error {
+ tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
+ doc, err := tpl.parser.parseDocument()
+ if err != nil {
+ return err
+ }
+ tpl.root = doc
+ return nil
+}
+
+func (p *Parser) parseDocument() (*nodeDocument, *Error) {
+ doc := &nodeDocument{}
+
+ for p.Remaining() > 0 {
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, err
+ }
+ doc.Nodes = append(doc.Nodes, node)
+ }
+
+ return doc, nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/parser_expression.go b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_expression.go
new file mode 100644
index 0000000..98c6580
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/parser_expression.go
@@ -0,0 +1,491 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type Expression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ opToken *Token
+}
+
+type relationalExpression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ opToken *Token
+}
+
+type simpleExpression struct {
+ negate bool
+ negativeSign bool
+ term1 IEvaluator
+ term2 IEvaluator
+ opToken *Token
+}
+
+type term struct {
+ // TODO: Add location token?
+ factor1 IEvaluator
+ factor2 IEvaluator
+ opToken *Token
+}
+
+type power struct {
+ // TODO: Add location token?
+ power1 IEvaluator
+ power2 IEvaluator
+}
+
+func (expr *Expression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *relationalExpression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *simpleExpression) FilterApplied(name string) bool {
+ return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
+ (expr.term2 != nil && expr.term2.FilterApplied(name)))
+}
+
+func (expr *term) FilterApplied(name string) bool {
+ return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
+ (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
+}
+
+func (expr *power) FilterApplied(name string) bool {
+ return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
+ (expr.power2 != nil && expr.power2.FilterApplied(name)))
+}
+
+func (expr *Expression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *relationalExpression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *simpleExpression) GetPositionToken() *Token {
+ return expr.term1.GetPositionToken()
+}
+
+func (expr *term) GetPositionToken() *Token {
+ return expr.factor1.GetPositionToken()
+}
+
+func (expr *power) GetPositionToken() *Token {
+ return expr.power1.GetPositionToken()
+}
+
+func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "and", "&&":
+ return AsValue(v1.IsTrue() && v2.IsTrue()), nil
+ case "or", "||":
+ return AsValue(v1.IsTrue() || v2.IsTrue()), nil
+ default:
+ panic(fmt.Sprintf("unimplemented: %s", expr.opToken.Val))
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "<=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() <= v2.Float()), nil
+ }
+ return AsValue(v1.Integer() <= v2.Integer()), nil
+ case ">=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() >= v2.Float()), nil
+ }
+ return AsValue(v1.Integer() >= v2.Integer()), nil
+ case "==":
+ return AsValue(v1.EqualValueTo(v2)), nil
+ case ">":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() > v2.Float()), nil
+ }
+ return AsValue(v1.Integer() > v2.Integer()), nil
+ case "<":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() < v2.Float()), nil
+ }
+ return AsValue(v1.Integer() < v2.Integer()), nil
+ case "!=", "<>":
+ return AsValue(!v1.EqualValueTo(v2)), nil
+ case "in":
+ return AsValue(v2.Contains(v1)), nil
+ default:
+ panic(fmt.Sprintf("unimplemented: %s", expr.opToken.Val))
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ t1, err := expr.term1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result := t1
+
+ if expr.negate {
+ result = result.Negate()
+ }
+
+ if expr.negativeSign {
+ if result.IsNumber() {
+ switch {
+ case result.IsFloat():
+ result = AsValue(-1 * result.Float())
+ case result.IsInteger():
+ result = AsValue(-1 * result.Integer())
+ default:
+ panic("not possible")
+ }
+ } else {
+ return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
+ }
+ }
+
+ if expr.term2 != nil {
+ t2, err := expr.term2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "+":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() + t2.Float()), nil
+ }
+ // Result will be an integer
+ return AsValue(result.Integer() + t2.Integer()), nil
+ case "-":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() - t2.Float()), nil
+ }
+ // Result will be an integer
+ return AsValue(result.Integer() - t2.Integer()), nil
+ default:
+ panic("unimplemented")
+ }
+ }
+
+ return result, nil
+}
+
+func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ f1, err := expr.factor1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.factor2 != nil {
+ f2, err := expr.factor2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "*":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() * f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() * f2.Integer()), nil
+ case "/":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() / f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() / f2.Integer()), nil
+ case "%":
+ // Result will be int
+ return AsValue(f1.Integer() % f2.Integer()), nil
+ default:
+ panic("unimplemented")
+ }
+ } else {
+ return f1, nil
+ }
+}
+
+func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ p1, err := expr.power1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.power2 != nil {
+ p2, err := expr.power2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(math.Pow(p1.Float(), p2.Float())), nil
+ }
+ return p1, nil
+}
+
+func (p *Parser) parseFactor() (IEvaluator, *Error) {
+ if p.Match(TokenSymbol, "(") != nil {
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if p.Match(TokenSymbol, ")") == nil {
+ return nil, p.Error("Closing bracket expected after expression", nil)
+ }
+ return expr, nil
+ }
+
+ return p.parseVariableOrLiteralWithFilter()
+}
+
+func (p *Parser) parsePower() (IEvaluator, *Error) {
+ pw := new(power)
+
+ power1, err := p.parseFactor()
+ if err != nil {
+ return nil, err
+ }
+ pw.power1 = power1
+
+ if p.Match(TokenSymbol, "^") != nil {
+ power2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ pw.power2 = power2
+ }
+
+ if pw.power2 == nil {
+ // Shortcut for faster evaluation
+ return pw.power1, nil
+ }
+
+ return pw, nil
+}
+
+func (p *Parser) parseTerm() (IEvaluator, *Error) {
+ returnTerm := new(term)
+
+ factor1, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ returnTerm.factor1 = factor1
+
+ for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
+ if returnTerm.opToken != nil {
+ // Create new sub-term
+ returnTerm = &term{
+ factor1: returnTerm,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ factor2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+
+ returnTerm.opToken = op
+ returnTerm.factor2 = factor2
+ }
+
+ if returnTerm.opToken == nil {
+ // Shortcut for faster evaluation
+ return returnTerm.factor1, nil
+ }
+
+ return returnTerm, nil
+}
+
+func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
+ expr := new(simpleExpression)
+
+ if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
+ if sign.Val == "-" {
+ expr.negativeSign = true
+ }
+ }
+
+ if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
+ expr.negate = true
+ }
+
+ term1, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+ expr.term1 = term1
+
+ for p.PeekOne(TokenSymbol, "+", "-") != nil {
+ if expr.opToken != nil {
+ // New sub expr
+ expr = &simpleExpression{
+ term1: expr,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ term2, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+
+ expr.term2 = term2
+ expr.opToken = op
+ }
+
+ if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
+ // Shortcut for faster evaluation
+ return expr.term1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
+ expr1, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ expr := &relationalExpression{
+ expr1: expr1,
+ }
+
+ if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
+ expr2, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.opToken = t
+ expr.expr2 = expr2
+ } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
+ expr2, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.opToken = t
+ expr.expr2 = expr2
+ }
+
+ if expr.expr2 == nil {
+ // Shortcut for faster evaluation
+ return expr.expr1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) ParseExpression() (IEvaluator, *Error) {
+ rexpr1, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ exp := &Expression{
+ expr1: rexpr1,
+ }
+
+ if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
+ op := p.Current()
+ p.Consume()
+ expr2, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ exp.expr2 = expr2
+ exp.opToken = op
+ }
+
+ if exp.expr2 == nil {
+ // Shortcut for faster evaluation
+ return exp.expr1, nil
+ }
+
+ return exp, nil
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2.go b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2.go
new file mode 100644
index 0000000..eda3aa0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/pongo2.go
@@ -0,0 +1,14 @@
+package pongo2
+
+// Version string
+const Version = "dev"
+
+// Must panics, if a Template couldn't successfully parsed. This is how you
+// would use it:
+// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
+func Must(tpl *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return tpl
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags.go
new file mode 100644
index 0000000..5168d17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags.go
@@ -0,0 +1,132 @@
+package pongo2
+
+/* Incomplete:
+ -----------
+
+ verbatim (only the "name" argument is missing for verbatim)
+
+ Reconsideration:
+ ----------------
+
+ debug (reason: not sure what to output yet)
+ regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
+
+ Following built-in tags wont be added:
+ --------------------------------------
+
+ csrf_token (reason: web-framework specific)
+ load (reason: python-specific)
+ url (reason: web-framework specific)
+*/
+
+import (
+ "fmt"
+)
+
+type INodeTag interface {
+ INode
+}
+
+// This is the function signature of the tag's parser you will have
+// to implement in order to create a new tag.
+//
+// 'doc' is providing access to the whole document while 'arguments'
+// is providing access to the user's arguments to the tag:
+//
+// {% your_tag_name some "arguments" 123 %}
+//
+// start_token will be the *Token with the tag's name in it (here: your_tag_name).
+//
+// Please see the Parser documentation on how to use the parser.
+// See RegisterTag()'s documentation for more information about
+// writing a tag as well.
+type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
+
+type tag struct {
+ name string
+ parser TagParser
+}
+
+var tags map[string]*tag
+
+func init() {
+ tags = make(map[string]*tag)
+}
+
+// Registers a new tag. If there's already a tag with the same
+// name, RegisterTag will panic. You usually want to call this
+// function in the tag's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterTag(name string, parserFn TagParser) {
+ _, existing := tags[name]
+ if existing {
+ panic(fmt.Sprintf("Tag with name '%s' is already registered.", name))
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+}
+
+// Replaces an already registered tag with a new implementation. Use this
+// function with caution since it allows you to change existing tag behaviour.
+func ReplaceTag(name string, parserFn TagParser) {
+ _, existing := tags[name]
+ if !existing {
+ panic(fmt.Sprintf("Tag with name '%s' does not exist (therefore cannot be overridden).", name))
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+}
+
+// Tag = "{%" IDENT ARGS "%}"
+func (p *Parser) parseTagElement() (INodeTag, *Error) {
+ p.Consume() // consume "{%"
+ tokenName := p.MatchType(TokenIdentifier)
+
+ // Check for identifier
+ if tokenName == nil {
+ return nil, p.Error("Tag name must be an identifier.", nil)
+ }
+
+ // Check for the existing tag
+ tag, exists := tags[tokenName.Val]
+ if !exists {
+ // Does not exists
+ return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
+ }
+
+ // Check sandbox tag restriction
+ if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
+ return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
+ }
+
+ var argsToken []*Token
+ for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
+ // Add token to args
+ argsToken = append(argsToken, p.Current())
+ p.Consume() // next token
+ }
+
+ // EOF?
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
+ }
+
+ p.Match(TokenSymbol, "%}")
+
+ argParser := newParser(p.name, argsToken, p.template)
+ if len(argsToken) == 0 {
+ // This is done to have nice EOF error messages
+ argParser.lastToken = tokenName
+ }
+
+ p.template.level++
+ defer func() { p.template.level-- }()
+ return tag.parser(p, tokenName, argParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_autoescape.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_autoescape.go
new file mode 100644
index 0000000..590a1db
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_autoescape.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagAutoescapeNode struct {
+ wrapper *NodeWrapper
+ autoescape bool
+}
+
+func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ old := ctx.Autoescape
+ ctx.Autoescape = node.autoescape
+
+ err := node.wrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ ctx.Autoescape = old
+
+ return nil
+}
+
+func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ autoescapeNode := &tagAutoescapeNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endautoescape")
+ if err != nil {
+ return nil, err
+ }
+ autoescapeNode.wrapper = wrapper
+
+ modeToken := arguments.MatchType(TokenIdentifier)
+ if modeToken == nil {
+ return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
+ }
+ if modeToken.Val == "on" {
+ autoescapeNode.autoescape = true
+ } else if modeToken.Val == "off" {
+ autoescapeNode.autoescape = false
+ } else {
+ return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
+ }
+
+ return autoescapeNode, nil
+}
+
+func init() {
+ RegisterTag("autoescape", tagAutoescapeParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_block.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_block.go
new file mode 100644
index 0000000..b558930
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_block.go
@@ -0,0 +1,93 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type tagBlockNode struct {
+ name string
+}
+
+func (node *tagBlockNode) getBlockWrapperByName(tpl *Template) *NodeWrapper {
+ var t *NodeWrapper
+ if tpl.child != nil {
+ // First ask the child for the block
+ t = node.getBlockWrapperByName(tpl.child)
+ }
+ if t == nil {
+ // Child has no block, lets look up here at parent
+ t = tpl.blocks[node.name]
+ }
+ return t
+}
+
+func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ tpl := ctx.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+ // Determine the block to execute
+ blockWrapper := node.getBlockWrapperByName(tpl)
+ if blockWrapper == nil {
+ // fmt.Printf("could not find: %s\n", node.name)
+ return ctx.Error("internal error: block_wrapper == nil in tagBlockNode.Execute()", nil)
+ }
+ err := blockWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Add support for {{ block.super }}
+
+ return nil
+}
+
+func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
+ }
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
+ }
+
+ if arguments.Remaining() != 0 {
+ return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
+ }
+
+ wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
+ if err != nil {
+ return nil, err
+ }
+ if endtagargs.Remaining() > 0 {
+ endtagnameToken := endtagargs.MatchType(TokenIdentifier)
+ if endtagnameToken != nil {
+ if endtagnameToken.Val != nameToken.Val {
+ return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
+ nameToken.Val, endtagnameToken.Val), nil)
+ }
+ }
+
+ if endtagnameToken == nil || endtagargs.Remaining() > 0 {
+ return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
+ }
+ }
+
+ tpl := doc.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+ _, hasBlock := tpl.blocks[nameToken.Val]
+ if !hasBlock {
+ tpl.blocks[nameToken.Val] = wrapper
+ } else {
+ return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
+ }
+
+ return &tagBlockNode{name: nameToken.Val}, nil
+}
+
+func init() {
+ RegisterTag("block", tagBlockParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_comment.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_comment.go
new file mode 100644
index 0000000..a66a973
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_comment.go
@@ -0,0 +1,27 @@
+package pongo2
+
+type tagCommentNode struct{}
+
+func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ commentNode := &tagCommentNode{}
+
+ // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
+ _, _, err := doc.WrapUntilTag("endcomment")
+ if err != nil {
+ return nil, err
+ }
+
+ if arguments.Count() != 0 {
+ return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
+ }
+
+ return commentNode, nil
+}
+
+func init() {
+ RegisterTag("comment", tagCommentParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_cycle.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_cycle.go
new file mode 100644
index 0000000..9b83b9b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_cycle.go
@@ -0,0 +1,106 @@
+package pongo2
+
+type tagCycleValue struct {
+ node *tagCycleNode
+ value *Value
+}
+
+type tagCycleNode struct {
+ position *Token
+ args []IEvaluator
+ idx int
+ asName string
+ silent bool
+}
+
+func (cv *tagCycleValue) String() string {
+ return cv.value.String()
+}
+
+func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ item := node.args[node.idx%len(node.args)]
+ node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if t, ok := val.Interface().(*tagCycleValue); ok {
+ // {% cycle "test1" "test2"
+ // {% cycle cycleitem %}
+
+ // Update the cycle value with next value
+ item := t.node.args[t.node.idx%len(t.node.args)]
+ t.node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ t.value = val
+
+ if !t.node.silent {
+ writer.WriteString(val.String())
+ }
+ } else {
+ // Regular call
+
+ cycleValue := &tagCycleValue{
+ node: node,
+ value: val,
+ }
+
+ if node.asName != "" {
+ ctx.Private[node.asName] = cycleValue
+ }
+ if !node.silent {
+ writer.WriteString(val.String())
+ }
+ }
+
+ return nil
+}
+
+// HINT: We're not supporting the old comma-seperated list of expresions argument-style
+func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ cycleNode := &tagCycleNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ cycleNode.args = append(cycleNode.args, node)
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // as
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
+ }
+ cycleNode.asName = nameToken.Val
+
+ if arguments.MatchOne(TokenIdentifier, "silent") != nil {
+ cycleNode.silent = true
+ }
+
+ // Now we're finished
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed cycle-tag.", nil)
+ }
+
+ return cycleNode, nil
+}
+
+func init() {
+ RegisterTag("cycle", tagCycleParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_extends.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_extends.go
new file mode 100644
index 0000000..5771020
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_extends.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagExtendsNode struct {
+ filename string
+}
+
+func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ extendsNode := &tagExtendsNode{}
+
+ if doc.template.level > 1 {
+ return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
+ }
+
+ if doc.template.parent != nil {
+ // Already one parent
+ return nil, arguments.Error("This template has already one parent.", start)
+ }
+
+ if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
+ // prepared, static template
+
+ // Get parent's filename
+ parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ // Parse the parent
+ parentTemplate, err := doc.template.set.FromFile(parentFilename)
+ if err != nil {
+ return nil, err.(*Error)
+ }
+
+ // Keep track of things
+ parentTemplate.child = doc.template
+ doc.template.parent = parentTemplate
+ extendsNode.filename = parentFilename
+ } else {
+ return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
+ }
+
+ return extendsNode, nil
+}
+
+func init() {
+ RegisterTag("extends", tagExtendsParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_filter.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_filter.go
new file mode 100644
index 0000000..b38fd92
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_filter.go
@@ -0,0 +1,95 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type nodeFilterCall struct {
+ name string
+ paramExpr IEvaluator
+}
+
+type tagFilterNode struct {
+ position *Token
+ bodyWrapper *NodeWrapper
+ filterChain []*nodeFilterCall
+}
+
+func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
+
+ err := node.bodyWrapper.Execute(ctx, temp)
+ if err != nil {
+ return err
+ }
+
+ value := AsValue(temp.String())
+
+ for _, call := range node.filterChain {
+ var param *Value
+ if call.paramExpr != nil {
+ param, err = call.paramExpr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+ value, err = ApplyFilter(call.name, value, param)
+ if err != nil {
+ return ctx.Error(err.Error(), node.position)
+ }
+ }
+
+ writer.WriteString(value.String())
+
+ return nil
+}
+
+func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ filterNode := &tagFilterNode{
+ position: start,
+ }
+
+ wrapper, _, err := doc.WrapUntilTag("endfilter")
+ if err != nil {
+ return nil, err
+ }
+ filterNode.bodyWrapper = wrapper
+
+ for arguments.Remaining() > 0 {
+ filterCall := &nodeFilterCall{}
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Expected a filter name (identifier).", nil)
+ }
+ filterCall.name = nameToken.Val
+
+ if arguments.MatchOne(TokenSymbol, ":") != nil {
+ // Filter parameter
+ // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
+ expr, err := arguments.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filterCall.paramExpr = expr
+ }
+
+ filterNode.filterChain = append(filterNode.filterChain, filterCall)
+
+ if arguments.MatchOne(TokenSymbol, "|") == nil {
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed filter-tag arguments.", nil)
+ }
+
+ return filterNode, nil
+}
+
+func init() {
+ RegisterTag("filter", tagFilterParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_firstof.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_firstof.go
new file mode 100644
index 0000000..5b2888e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_firstof.go
@@ -0,0 +1,49 @@
+package pongo2
+
+type tagFirstofNode struct {
+ position *Token
+ args []IEvaluator
+}
+
+func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, arg := range node.args {
+ val, err := arg.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if val.IsTrue() {
+ if ctx.Autoescape && !arg.FilterApplied("safe") {
+ val, err = ApplyFilter("escape", val, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(val.String())
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ firstofNode := &tagFirstofNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ firstofNode.args = append(firstofNode.args, node)
+ }
+
+ return firstofNode, nil
+}
+
+func init() {
+ RegisterTag("firstof", tagFirstofParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_for.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_for.go
new file mode 100644
index 0000000..5b0b555
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_for.go
@@ -0,0 +1,159 @@
+package pongo2
+
+type tagForNode struct {
+ key string
+ value string // only for maps: for key, value in map
+ objectEvaluator IEvaluator
+ reversed bool
+ sorted bool
+
+ bodyWrapper *NodeWrapper
+ emptyWrapper *NodeWrapper
+}
+
+type tagForLoopInformation struct {
+ Counter int
+ Counter0 int
+ Revcounter int
+ Revcounter0 int
+ First bool
+ Last bool
+ Parentloop *tagForLoopInformation
+}
+
+func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
+ // Backup forloop (as parentloop in public context), key-name and value-name
+ forCtx := NewChildExecutionContext(ctx)
+ parentloop := forCtx.Private["forloop"]
+
+ // Create loop struct
+ loopInfo := &tagForLoopInformation{
+ First: true,
+ }
+
+ // Is it a loop in a loop?
+ if parentloop != nil {
+ loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
+ }
+
+ // Register loopInfo in public context
+ forCtx.Private["forloop"] = loopInfo
+
+ obj, err := node.objectEvaluator.Evaluate(forCtx)
+ if err != nil {
+ return err
+ }
+
+ obj.IterateOrder(func(idx, count int, key, value *Value) bool {
+ // There's something to iterate over (correct type and at least 1 item)
+
+ // Update loop infos and public context
+ forCtx.Private[node.key] = key
+ if value != nil {
+ forCtx.Private[node.value] = value
+ }
+ loopInfo.Counter = idx + 1
+ loopInfo.Counter0 = idx
+ if idx == 1 {
+ loopInfo.First = false
+ }
+ if idx+1 == count {
+ loopInfo.Last = true
+ }
+ loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
+ loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
+
+ // Render elements with updated context
+ err := node.bodyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ return false
+ }
+ return true
+ }, func() {
+ // Nothing to iterate over (maybe wrong type or no items)
+ if node.emptyWrapper != nil {
+ err := node.emptyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ }
+ }
+ }, node.reversed, node.sorted)
+
+ return forError
+}
+
+func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ forNode := &tagForNode{}
+
+ // Arguments parsing
+ var valueToken *Token
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
+ }
+
+ if arguments.Match(TokenSymbol, ",") != nil {
+ // Value name is provided
+ valueToken = arguments.MatchType(TokenIdentifier)
+ if valueToken == nil {
+ return nil, arguments.Error("Value name must be an identifier.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "in") == nil {
+ return nil, arguments.Error("Expected keyword 'in'.", nil)
+ }
+
+ objectEvaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ forNode.objectEvaluator = objectEvaluator
+ forNode.key = keyToken.Val
+ if valueToken != nil {
+ forNode.value = valueToken.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
+ forNode.reversed = true
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
+ forNode.sorted = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed for-loop arguments.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
+ if err != nil {
+ return nil, err
+ }
+ forNode.bodyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "empty" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endfor")
+ if err != nil {
+ return nil, err
+ }
+ forNode.emptyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return forNode, nil
+}
+
+func init() {
+ RegisterTag("for", tagForParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_if.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_if.go
new file mode 100644
index 0000000..3eeaf3b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_if.go
@@ -0,0 +1,76 @@
+package pongo2
+
+type tagIfNode struct {
+ conditions []IEvaluator
+ wrappers []*NodeWrapper
+}
+
+func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for i, condition := range node.conditions {
+ result, err := condition.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if result.IsTrue() {
+ return node.wrappers[i].Execute(ctx, writer)
+ }
+ // Last condition?
+ if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
+ return node.wrappers[i+1].Execute(ctx, writer)
+ }
+ }
+ return nil
+}
+
+func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifNode := &tagIfNode{}
+
+ // Parse first and main IF condition
+ condition, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifNode.conditions = append(ifNode.conditions, condition)
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("If-condition is malformed.", nil)
+ }
+
+ // Check the rest
+ for {
+ wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
+ if err != nil {
+ return nil, err
+ }
+ ifNode.wrappers = append(ifNode.wrappers, wrapper)
+
+ if wrapper.Endtag == "elif" {
+ // elif can take a condition
+ condition, err = tagArgs.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifNode.conditions = append(ifNode.conditions, condition)
+
+ if tagArgs.Remaining() > 0 {
+ return nil, tagArgs.Error("Elif-condition is malformed.", nil)
+ }
+ } else {
+ if tagArgs.Count() > 0 {
+ // else/endif can't take any conditions
+ return nil, tagArgs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ if wrapper.Endtag == "endif" {
+ break
+ }
+ }
+
+ return ifNode, nil
+}
+
+func init() {
+ RegisterTag("if", tagIfParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifchanged.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifchanged.go
new file mode 100644
index 0000000..45296a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifchanged.go
@@ -0,0 +1,116 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type tagIfchangedNode struct {
+ watchedExpr []IEvaluator
+ lastValues []*Value
+ lastContent []byte
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if len(node.watchedExpr) == 0 {
+ // Check against own rendered body
+
+ buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+ err := node.thenWrapper.Execute(ctx, buf)
+ if err != nil {
+ return err
+ }
+
+ bufBytes := buf.Bytes()
+ if !bytes.Equal(node.lastContent, bufBytes) {
+ // Rendered content changed, output it
+ writer.Write(bufBytes)
+ node.lastContent = bufBytes
+ }
+ } else {
+ nowValues := make([]*Value, 0, len(node.watchedExpr))
+ for _, expr := range node.watchedExpr {
+ val, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ nowValues = append(nowValues, val)
+ }
+
+ // Compare old to new values now
+ changed := len(node.lastValues) == 0
+
+ for idx, oldVal := range node.lastValues {
+ if !oldVal.EqualValueTo(nowValues[idx]) {
+ changed = true
+ break // we can stop here because ONE value changed
+ }
+ }
+
+ node.lastValues = nowValues
+
+ if changed {
+ // Render thenWrapper
+ err := node.thenWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Render elseWrapper
+ err := node.elseWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifchangedNode := &tagIfchangedNode{}
+
+ for arguments.Remaining() > 0 {
+ // Parse condition
+ expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifchangedNode, nil
+}
+
+func init() {
+ RegisterTag("ifchanged", tagIfchangedParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifequal.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifequal.go
new file mode 100644
index 0000000..103f1c7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifequal.go
@@ -0,0 +1,78 @@
+package pongo2
+
+type tagIfEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ }
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ return nil
+}
+
+func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifequalNode := &tagIfEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.var1 = var1
+ ifequalNode.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifequalNode, nil
+}
+
+func init() {
+ RegisterTag("ifequal", tagIfEqualParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifnotequal.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifnotequal.go
new file mode 100644
index 0000000..0d287d3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ifnotequal.go
@@ -0,0 +1,78 @@
+package pongo2
+
+type tagIfNotEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := !r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ }
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ return nil
+}
+
+func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifnotequalNode := &tagIfNotEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.var1 = var1
+ ifnotequalNode.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifnotequalNode, nil
+}
+
+func init() {
+ RegisterTag("ifnotequal", tagIfNotEqualParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_import.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_import.go
new file mode 100644
index 0000000..7e0d6a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_import.go
@@ -0,0 +1,84 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type tagImportNode struct {
+ position *Token
+ filename string
+ macros map[string]*tagMacroNode // alias/name -> macro instance
+}
+
+func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for name, macro := range node.macros {
+ func(name string, macro *tagMacroNode) {
+ ctx.Private[name] = func(args ...*Value) *Value {
+ return macro.call(ctx, args...)
+ }
+ }(name, macro)
+ }
+ return nil
+}
+
+func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ importNode := &tagImportNode{
+ position: start,
+ macros: make(map[string]*tagMacroNode),
+ }
+
+ filenameToken := arguments.MatchType(TokenString)
+ if filenameToken == nil {
+ return nil, arguments.Error("Import-tag needs a filename as string.", nil)
+ }
+
+ importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ if arguments.Remaining() == 0 {
+ return nil, arguments.Error("You must at least specify one macro to import.", nil)
+ }
+
+ // Compile the given template
+ tpl, err := doc.template.set.FromFile(importNode.filename)
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
+ }
+
+ for arguments.Remaining() > 0 {
+ macroNameToken := arguments.MatchType(TokenIdentifier)
+ if macroNameToken == nil {
+ return nil, arguments.Error("Expected macro name (identifier).", nil)
+ }
+
+ asName := macroNameToken.Val
+ if arguments.Match(TokenKeyword, "as") != nil {
+ aliasToken := arguments.MatchType(TokenIdentifier)
+ if aliasToken == nil {
+ return nil, arguments.Error("Expected macro alias name (identifier).", nil)
+ }
+ asName = aliasToken.Val
+ }
+
+ macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
+ if !has {
+ return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
+ importNode.filename), macroNameToken)
+ }
+
+ importNode.macros[asName] = macroInstance
+
+ if arguments.Remaining() == 0 {
+ break
+ }
+
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ','.", nil)
+ }
+ }
+
+ return importNode, nil
+}
+
+func init() {
+ RegisterTag("import", tagImportParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_include.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_include.go
new file mode 100644
index 0000000..6d619fd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_include.go
@@ -0,0 +1,146 @@
+package pongo2
+
+type tagIncludeNode struct {
+ tpl *Template
+ filenameEvaluator IEvaluator
+ lazy bool
+ only bool
+ filename string
+ withPairs map[string]IEvaluator
+ ifExists bool
+}
+
+func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Building the context for the template
+ includeCtx := make(Context)
+
+ // Fill the context with all data from the parent
+ if !node.only {
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+ }
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.withPairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ includeCtx[key] = val
+ }
+
+ // Execute the template
+ if node.lazy {
+ // Evaluate the filename
+ filename, err := node.filenameEvaluator.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if filename.String() == "" {
+ return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
+ }
+
+ // Get include-filename
+ includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
+
+ includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
+ if err2 != nil {
+ // if this is ReadFile error, and "if_exists" flag is enabled
+ if node.ifExists && err2.(*Error).Sender == "fromfile" {
+ return nil
+ }
+ return err2.(*Error)
+ }
+ err2 = includedTpl.ExecuteWriter(includeCtx, writer)
+ if err2 != nil {
+ return err2.(*Error)
+ }
+ return nil
+ }
+ // Template is already parsed with static filename
+ err := node.tpl.ExecuteWriter(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ return nil
+}
+
+type tagIncludeEmptyNode struct{}
+
+func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ includeNode := &tagIncludeNode{
+ withPairs: make(map[string]IEvaluator),
+ }
+
+ if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
+ // prepared, static template
+
+ // "if_exists" flag
+ ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
+
+ // Get include-filename
+ includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ // Parse the parent
+ includeNode.filename = includedFilename
+ includedTpl, err := doc.template.set.FromFile(includedFilename)
+ if err != nil {
+ // if this is ReadFile error, and "if_exists" token presents we should create and empty node
+ if err.(*Error).Sender == "fromfile" && ifExists {
+ return &tagIncludeEmptyNode{}, nil
+ }
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
+ }
+ includeNode.tpl = includedTpl
+ } else {
+ // No String, then the user wants to use lazy-evaluation (slower, but possible)
+ filenameEvaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
+ }
+ includeNode.filenameEvaluator = filenameEvaluator
+ includeNode.lazy = true
+ includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
+ }
+
+ // After having parsed the filename we're gonna parse the with+only options
+ if arguments.Match(TokenIdentifier, "with") != nil {
+ for arguments.Remaining() > 0 {
+ // We have at least one key=expr pair (because of starting "with")
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
+ }
+
+ includeNode.withPairs[keyToken.Val] = valueExpr
+
+ // Only?
+ if arguments.Match(TokenIdentifier, "only") != nil {
+ includeNode.only = true
+ break // stop parsing arguments because it's the last option
+ }
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
+ }
+
+ return includeNode, nil
+}
+
+func init() {
+ RegisterTag("include", tagIncludeParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_lorem.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_lorem.go
new file mode 100644
index 0000000..8a152b3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_lorem.go
@@ -0,0 +1,131 @@
+package pongo2
+
+import (
+ "math/rand"
+ "strings"
+ "time"
+)
+
+var (
+ tagLoremParagraphs = strings.Split(tagLoremText, "\n")
+ tagLoremWords = strings.Fields(tagLoremText)
+)
+
+type tagLoremNode struct {
+ position *Token
+ count int // number of paragraphs
+ method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
+ random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
+}
+
+func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ switch node.method {
+ case "b":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ }
+ }
+ case "w":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[rand.Intn(len(tagLoremWords))]
+ writer.WriteString(word)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[i%len(tagLoremWords)]
+ writer.WriteString(word)
+ }
+ }
+ case "p":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("")
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ writer.WriteString("
")
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("")
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ writer.WriteString("
")
+
+ }
+ }
+ default:
+ panic("unsupported method")
+ }
+
+ return nil
+}
+
+func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ loremNode := &tagLoremNode{
+ position: start,
+ count: 1,
+ method: "b",
+ }
+
+ if countToken := arguments.MatchType(TokenNumber); countToken != nil {
+ loremNode.count = AsValue(countToken.Val).Integer()
+ }
+
+ if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
+ if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
+ return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
+ }
+
+ loremNode.method = methodToken.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "random") != nil {
+ loremNode.random = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
+ }
+
+ return loremNode, nil
+}
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterTag("lorem", tagLoremParser)
+}
+
+const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
+At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
+Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_macro.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_macro.go
new file mode 100644
index 0000000..18a2c3c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_macro.go
@@ -0,0 +1,149 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type tagMacroNode struct {
+ position *Token
+ name string
+ argsOrder []string
+ args map[string]IEvaluator
+ exported bool
+
+ wrapper *NodeWrapper
+}
+
+func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ ctx.Private[node.name] = func(args ...*Value) *Value {
+ return node.call(ctx, args...)
+ }
+
+ return nil
+}
+
+func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
+ argsCtx := make(Context)
+
+ for k, v := range node.args {
+ if v == nil {
+ // User did not provided a default value
+ argsCtx[k] = nil
+ } else {
+ // Evaluate the default value
+ valueExpr, err := v.Evaluate(ctx)
+ if err != nil {
+ ctx.Logf(err.Error())
+ return AsSafeValue(err.Error())
+ }
+
+ argsCtx[k] = valueExpr
+ }
+ }
+
+ if len(args) > len(node.argsOrder) {
+ // Too many arguments, we're ignoring them and just logging into debug mode.
+ err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
+ node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
+
+ ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
+ return AsSafeValue(err.Error())
+ }
+
+ // Make a context for the macro execution
+ macroCtx := NewChildExecutionContext(ctx)
+
+ // Register all arguments in the private context
+ macroCtx.Private.Update(argsCtx)
+
+ for idx, argValue := range args {
+ macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
+ }
+
+ var b bytes.Buffer
+ err := node.wrapper.Execute(macroCtx, &b)
+ if err != nil {
+ return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
+ }
+
+ return AsSafeValue(b.String())
+}
+
+func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ macroNode := &tagMacroNode{
+ position: start,
+ args: make(map[string]IEvaluator),
+ }
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
+ }
+ macroNode.name = nameToken.Val
+
+ if arguments.MatchOne(TokenSymbol, "(") == nil {
+ return nil, arguments.Error("Expected '('.", nil)
+ }
+
+ for arguments.Match(TokenSymbol, ")") == nil {
+ argNameToken := arguments.MatchType(TokenIdentifier)
+ if argNameToken == nil {
+ return nil, arguments.Error("Expected argument name as identifier.", nil)
+ }
+ macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
+
+ if arguments.Match(TokenSymbol, "=") != nil {
+ // Default expression follows
+ argDefaultExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ macroNode.args[argNameToken.Val] = argDefaultExpr
+ } else {
+ // No default expression
+ macroNode.args[argNameToken.Val] = nil
+ }
+
+ if arguments.Match(TokenSymbol, ")") != nil {
+ break
+ }
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ',' or ')'.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "export") != nil {
+ macroNode.exported = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed macro-tag.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("endmacro")
+ if err != nil {
+ return nil, err
+ }
+ macroNode.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if macroNode.exported {
+ // Now register the macro if it wants to be exported
+ _, has := doc.template.exportedMacros[macroNode.name]
+ if has {
+ return nil, doc.Error(fmt.Sprintf("Another macro with name '%s' already exported.", macroNode.name), start)
+ }
+ doc.template.exportedMacros[macroNode.name] = macroNode
+ }
+
+ return macroNode, nil
+}
+
+func init() {
+ RegisterTag("macro", tagMacroParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_now.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_now.go
new file mode 100644
index 0000000..d9fa4a3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_now.go
@@ -0,0 +1,50 @@
+package pongo2
+
+import (
+ "time"
+)
+
+type tagNowNode struct {
+ position *Token
+ format string
+ fake bool
+}
+
+func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ var t time.Time
+ if node.fake {
+ t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
+ } else {
+ t = time.Now()
+ }
+
+ writer.WriteString(t.Format(node.format))
+
+ return nil
+}
+
+func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ nowNode := &tagNowNode{
+ position: start,
+ }
+
+ formatToken := arguments.MatchType(TokenString)
+ if formatToken == nil {
+ return nil, arguments.Error("Expected a format string.", nil)
+ }
+ nowNode.format = formatToken.Val
+
+ if arguments.MatchOne(TokenIdentifier, "fake") != nil {
+ nowNode.fake = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed now-tag arguments.", nil)
+ }
+
+ return nowNode, nil
+}
+
+func init() {
+ RegisterTag("now", tagNowParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_set.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_set.go
new file mode 100644
index 0000000..be121c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_set.go
@@ -0,0 +1,50 @@
+package pongo2
+
+type tagSetNode struct {
+ name string
+ expression IEvaluator
+}
+
+func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Evaluate expression
+ value, err := node.expression.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ ctx.Private[node.name] = value
+ return nil
+}
+
+func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ node := &tagSetNode{}
+
+ // Parse variable name
+ typeToken := arguments.MatchType(TokenIdentifier)
+ if typeToken == nil {
+ return nil, arguments.Error("Expected an identifier.", nil)
+ }
+ node.name = typeToken.Val
+
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+
+ // Variable expression
+ keyExpression, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expression = keyExpression
+
+ // Remaining arguments
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
+ }
+
+ return node, nil
+}
+
+func init() {
+ RegisterTag("set", tagSetParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_spaceless.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_spaceless.go
new file mode 100644
index 0000000..4fa851b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_spaceless.go
@@ -0,0 +1,54 @@
+package pongo2
+
+import (
+ "bytes"
+ "regexp"
+)
+
+type tagSpacelessNode struct {
+ wrapper *NodeWrapper
+}
+
+var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
+
+func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+
+ err := node.wrapper.Execute(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ s := b.String()
+ // Repeat this recursively
+ changed := true
+ for changed {
+ s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
+ changed = s != s2
+ s = s2
+ }
+
+ writer.WriteString(s)
+
+ return nil
+}
+
+func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ spacelessNode := &tagSpacelessNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endspaceless")
+ if err != nil {
+ return nil, err
+ }
+ spacelessNode.wrapper = wrapper
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
+ }
+
+ return spacelessNode, nil
+}
+
+func init() {
+ RegisterTag("spaceless", tagSpacelessParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ssi.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ssi.go
new file mode 100644
index 0000000..09c2325
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_ssi.go
@@ -0,0 +1,68 @@
+package pongo2
+
+import (
+ "io/ioutil"
+)
+
+type tagSSINode struct {
+ filename string
+ content string
+ template *Template
+}
+
+func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if node.template != nil {
+ // Execute the template within the current context
+ includeCtx := make(Context)
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+
+ err := node.template.execute(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ } else {
+ // Just print out the content
+ writer.WriteString(node.content)
+ }
+ return nil
+}
+
+func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ SSINode := &tagSSINode{}
+
+ if fileToken := arguments.MatchType(TokenString); fileToken != nil {
+ SSINode.filename = fileToken.Val
+
+ if arguments.Match(TokenIdentifier, "parsed") != nil {
+ // parsed
+ temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
+ }
+ SSINode.template = temporaryTpl
+ } else {
+ // plaintext
+ buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
+ if err != nil {
+ return nil, (&Error{
+ Sender: "tag:ssi",
+ ErrorMsg: err.Error(),
+ }).updateFromTokenIfNeeded(doc.template, fileToken)
+ }
+ SSINode.content = string(buf)
+ }
+ } else {
+ return nil, arguments.Error("First argument must be a string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed SSI-tag argument.", nil)
+ }
+
+ return SSINode, nil
+}
+
+func init() {
+ RegisterTag("ssi", tagSSIParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_templatetag.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_templatetag.go
new file mode 100644
index 0000000..164b4dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_templatetag.go
@@ -0,0 +1,45 @@
+package pongo2
+
+type tagTemplateTagNode struct {
+ content string
+}
+
+var templateTagMapping = map[string]string{
+ "openblock": "{%",
+ "closeblock": "%}",
+ "openvariable": "{{",
+ "closevariable": "}}",
+ "openbrace": "{",
+ "closebrace": "}",
+ "opencomment": "{#",
+ "closecomment": "#}",
+}
+
+func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString(node.content)
+ return nil
+}
+
+func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ttNode := &tagTemplateTagNode{}
+
+ if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
+ output, found := templateTagMapping[argToken.Val]
+ if !found {
+ return nil, arguments.Error("Argument not found", argToken)
+ }
+ ttNode.content = output
+ } else {
+ return nil, arguments.Error("Identifier expected.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
+ }
+
+ return ttNode, nil
+}
+
+func init() {
+ RegisterTag("templatetag", tagTemplateTagParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_widthratio.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_widthratio.go
new file mode 100644
index 0000000..70c9c3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_widthratio.go
@@ -0,0 +1,83 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type tagWidthratioNode struct {
+ position *Token
+ current, max IEvaluator
+ width IEvaluator
+ ctxName string
+}
+
+func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ current, err := node.current.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ max, err := node.max.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ width, err := node.width.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
+
+ if node.ctxName == "" {
+ writer.WriteString(fmt.Sprintf("%d", value))
+ } else {
+ ctx.Private[node.ctxName] = value
+ }
+
+ return nil
+}
+
+func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ widthratioNode := &tagWidthratioNode{
+ position: start,
+ }
+
+ current, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.current = current
+
+ max, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.max = max
+
+ width, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.width = width
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // Name follows
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Expected name (identifier).", nil)
+ }
+ widthratioNode.ctxName = nameToken.Val
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
+ }
+
+ return widthratioNode, nil
+}
+
+func init() {
+ RegisterTag("widthratio", tagWidthratioParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/tags_with.go b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_with.go
new file mode 100644
index 0000000..32b3c1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/tags_with.go
@@ -0,0 +1,88 @@
+package pongo2
+
+type tagWithNode struct {
+ withPairs map[string]IEvaluator
+ wrapper *NodeWrapper
+}
+
+func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ //new context for block
+ withctx := NewChildExecutionContext(ctx)
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.withPairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ withctx.Private[key] = val
+ }
+
+ return node.wrapper.Execute(withctx, writer)
+}
+
+func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ withNode := &tagWithNode{
+ withPairs: make(map[string]IEvaluator),
+ }
+
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
+ }
+
+ wrapper, endargs, err := doc.WrapUntilTag("endwith")
+ if err != nil {
+ return nil, err
+ }
+ withNode.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ // Scan through all arguments to see which style the user uses (old or new style).
+ // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
+ oldStyle := false // by default we're using the new_style
+ for i := 0; i < arguments.Count(); i++ {
+ if arguments.PeekN(i, TokenKeyword, "as") != nil {
+ oldStyle = true
+ break
+ }
+ }
+
+ for arguments.Remaining() > 0 {
+ if oldStyle {
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if arguments.Match(TokenKeyword, "as") == nil {
+ return nil, arguments.Error("Expected 'as' keyword.", nil)
+ }
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ withNode.withPairs[keyToken.Val] = valueExpr
+ } else {
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ withNode.withPairs[keyToken.Val] = valueExpr
+ }
+ }
+
+ return withNode, nil
+}
+
+func init() {
+ RegisterTag("with", tagWithParser)
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template.go b/Godeps/_workspace/src/github.com/flosch/pongo2/template.go
new file mode 100644
index 0000000..74bd30b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template.go
@@ -0,0 +1,193 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+type TemplateWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+}
+
+type templateWriter struct {
+ w io.Writer
+}
+
+func (tw *templateWriter) WriteString(s string) (int, error) {
+ return tw.w.Write([]byte(s))
+}
+
+func (tw *templateWriter) Write(b []byte) (int, error) {
+ return tw.w.Write(b)
+}
+
+type Template struct {
+ set *TemplateSet
+
+ // Input
+ isTplString bool
+ name string
+ tpl string
+ size int
+
+ // Calculation
+ tokens []*Token
+ parser *Parser
+
+ // first come, first serve (it's important to not override existing entries in here)
+ level int
+ parent *Template
+ child *Template
+ blocks map[string]*NodeWrapper
+ exportedMacros map[string]*tagMacroNode
+
+ // Output
+ root *nodeDocument
+}
+
+func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
+ return newTemplate(set, "", true, tpl)
+}
+
+func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
+ strTpl := string(tpl)
+
+ // Create the template
+ t := &Template{
+ set: set,
+ isTplString: isTplString,
+ name: name,
+ tpl: strTpl,
+ size: len(strTpl),
+ blocks: make(map[string]*NodeWrapper),
+ exportedMacros: make(map[string]*tagMacroNode),
+ }
+
+ // Tokenize it
+ tokens, err := lex(name, strTpl)
+ if err != nil {
+ return nil, err
+ }
+ t.tokens = tokens
+
+ // For debugging purposes, show all tokens:
+ /*for i, t := range tokens {
+ fmt.Printf("%3d. %s\n", i, t)
+ }*/
+
+ // Parse it
+ err = t.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return t, nil
+}
+
+func (tpl *Template) execute(context Context, writer TemplateWriter) error {
+ // Determine the parent to be executed (for template inheritance)
+ parent := tpl
+ for parent.parent != nil {
+ parent = parent.parent
+ }
+
+ // Create context if none is given
+ newContext := make(Context)
+ newContext.Update(tpl.set.Globals)
+
+ if context != nil {
+ newContext.Update(context)
+
+ if len(newContext) > 0 {
+ // Check for context name syntax
+ err := newContext.checkForValidIdentifiers()
+ if err != nil {
+ return err
+ }
+
+ // Check for clashes with macro names
+ for k := range newContext {
+ _, has := tpl.exportedMacros[k]
+ if has {
+ return &Error{
+ Filename: tpl.name,
+ Sender: "execution",
+ ErrorMsg: fmt.Sprintf("Context key name '%s' clashes with macro '%s'.", k, k),
+ }
+ }
+ }
+ }
+ }
+
+ // Create operational context
+ ctx := newExecutionContext(parent, newContext)
+
+ // Run the selected document
+ if err := parent.root.Execute(ctx, writer); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
+ return tpl.execute(context, &templateWriter{w: writer})
+}
+
+func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
+ // Create output buffer
+ // We assume that the rendered template will be 30% larger
+ buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
+ if err := tpl.execute(context, buffer); err != nil {
+ return nil, err
+ }
+ return buffer, nil
+}
+
+// Executes the template with the given context and writes to writer (io.Writer)
+// on success. Context can be nil. Nothing is written on error; instead the error
+// is being returned.
+func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
+ buf, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return err
+ }
+ _, err = buf.WriteTo(writer)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Same as ExecuteWriter. The only difference between both functions is that
+// this function might already have written parts of the generated template in the
+// case of an execution error because there's no intermediate buffer involved for
+// performance reasons. This is handy if you need high performance template
+// generation or if you want to manage your own pool of buffers.
+func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
+ return tpl.newTemplateWriterAndExecute(context, writer)
+}
+
+// Executes the template and returns the rendered template as a []byte
+func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// Executes the template and returns the rendered template as a string
+func (tpl *Template) Execute(context Context) (string, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return "", err
+ }
+
+ return buffer.String(), nil
+
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_loader.go b/Godeps/_workspace/src/github.com/flosch/pongo2/template_loader.go
new file mode 100644
index 0000000..abd2340
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_loader.go
@@ -0,0 +1,156 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+)
+
+// LocalFilesystemLoader represents a local filesystem loader with basic
+// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
+type LocalFilesystemLoader struct {
+ baseDir string
+}
+
+// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
+// and panics if there's any error during instantiation. The parameters
+// are the same like NewLocalFileSystemLoader.
+func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
+ fs, err := NewLocalFileSystemLoader(baseDir)
+ if err != nil {
+ log.Panic(err)
+ }
+ return fs
+}
+
+// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
+// templatesto be loaded from disk (unrestricted). If any base directory
+// is given (or being set using SetBaseDir), this base directory is being used
+// for path calculation in template inclusions/imports. Otherwise the path
+// is calculated based relatively to the including template's path.
+func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
+ fs := &LocalFilesystemLoader{}
+ if baseDir != "" {
+ if err := fs.SetBaseDir(baseDir); err != nil {
+ return nil, err
+ }
+ }
+ return fs, nil
+}
+
+// SetBaseDir sets the template's base directory. This directory will
+// be used for any relative path in filters, tags and From*-functions to determine
+// your template. See the comment for NewLocalFileSystemLoader as well.
+func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
+ // Make the path absolute
+ if !filepath.IsAbs(path) {
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+ path = abs
+ }
+
+ // Check for existence
+ fi, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("The given path '%s' is not a directory.", path)
+ }
+
+ fs.baseDir = path
+ return nil
+}
+
+// Get reads the path's content from your local filesystem.
+func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(buf), nil
+}
+
+// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
+// When there's no base dir set, the absolute path to the filename
+// will be calculated based on either the provided base directory (which
+// might be a path of a template which includes another template) or
+// the current working directory.
+func (fs *LocalFilesystemLoader) Abs(base, name string) string {
+ if filepath.IsAbs(name) {
+ return name
+ }
+
+ // Our own base dir has always priority; if there's none
+ // we use the path provided in base.
+ var err error
+ if fs.baseDir == "" {
+ if base == "" {
+ base, err = os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ return filepath.Join(base, name)
+ }
+
+ return filepath.Join(filepath.Dir(base), name)
+ }
+
+ return filepath.Join(fs.baseDir, name)
+}
+
+// SandboxedFilesystemLoader is still WIP.
+type SandboxedFilesystemLoader struct {
+ *LocalFilesystemLoader
+}
+
+// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
+func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
+ fs, err := NewLocalFileSystemLoader(baseDir)
+ if err != nil {
+ return nil, err
+ }
+ return &SandboxedFilesystemLoader{
+ LocalFilesystemLoader: fs,
+ }, nil
+}
+
+// Move sandbox to a virtual fs
+
+/*
+if len(set.SandboxDirectories) > 0 {
+ defer func() {
+ // Remove any ".." or other crap
+ resolvedPath = filepath.Clean(resolvedPath)
+
+ // Make the path absolute
+ absPath, err := filepath.Abs(resolvedPath)
+ if err != nil {
+ panic(err)
+ }
+ resolvedPath = absPath
+
+ // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
+ for _, pattern := range set.SandboxDirectories {
+ matched, err := filepath.Match(pattern, resolvedPath)
+ if err != nil {
+ panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
+ }
+ if matched {
+ // OK!
+ return
+ }
+ }
+
+ // No pattern matched, we have to log+deny the request
+ set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
+ resolvedPath = ""
+ }()
+}
+*/
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/template_sets.go b/Godeps/_workspace/src/github.com/flosch/pongo2/template_sets.go
new file mode 100644
index 0000000..bcd8a63
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/template_sets.go
@@ -0,0 +1,239 @@
+package pongo2
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "sync"
+)
+
+// TemplateLoader allows to implement a virtual file system.
+type TemplateLoader interface {
+ // Abs calculates the path to a given template. Whenever a path must be resolved
+ // due to an import from another template, the base equals the parent template's path.
+ Abs(base, name string) string
+
+ // Get returns an io.Reader where the template's content can be read from.
+ Get(path string) (io.Reader, error)
+}
+
+// TemplateSet allows you to create your own group of templates with their own
+// global context (which is shared among all members of the set) and their own
+// configuration.
+// It's useful for a separation of different kind of templates
+// (e. g. web templates vs. mail templates).
+type TemplateSet struct {
+ name string
+ loader TemplateLoader
+
+ // Globals will be provided to all templates created within this template set
+ Globals Context
+
+ // If debug is true (default false), ExecutionContext.Logf() will work and output
+ // to STDOUT. Furthermore, FromCache() won't cache the templates.
+ // Make sure to synchronize the access to it in case you're changing this
+ // variable during program execution (and template compilation/execution).
+ Debug bool
+
+ // Sandbox features
+ // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
+ //
+ // For efficiency reasons you can ban tags/filters only *before* you have
+ // added your first template to the set (restrictions are statically checked).
+ // After you added one, it's not possible anymore (for your personal security).
+ firstTemplateCreated bool
+ bannedTags map[string]bool
+ bannedFilters map[string]bool
+
+ // Template cache (for FromCache())
+ templateCache map[string]*Template
+ templateCacheMutex sync.Mutex
+}
+
+// NewSet can be used to create sets with different kind of templates
+// (e. g. web from mail templates), with different globals or
+// other configurations.
+func NewSet(name string, loader TemplateLoader) *TemplateSet {
+ return &TemplateSet{
+ name: name,
+ loader: loader,
+ Globals: make(Context),
+ bannedTags: make(map[string]bool),
+ bannedFilters: make(map[string]bool),
+ templateCache: make(map[string]*Template),
+ }
+}
+
+func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
+ name := ""
+ if tpl != nil && tpl.isTplString {
+ return path
+ }
+ if tpl != nil {
+ name = tpl.name
+ }
+ return set.loader.Abs(name, path)
+}
+
+// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanTag(name string) error {
+ _, has := tags[name]
+ if !has {
+ return fmt.Errorf("Tag '%s' not found.", name)
+ }
+ if set.firstTemplateCreated {
+ return errors.New("You cannot ban any tags after you've added your first template to your template set.")
+ }
+ _, has = set.bannedTags[name]
+ if has {
+ return fmt.Errorf("Tag '%s' is already banned.", name)
+ }
+ set.bannedTags[name] = true
+
+ return nil
+}
+
+// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanFilter(name string) error {
+ _, has := filters[name]
+ if !has {
+ return fmt.Errorf("Filter '%s' not found.", name)
+ }
+ if set.firstTemplateCreated {
+ return errors.New("You cannot ban any filters after you've added your first template to your template set.")
+ }
+ _, has = set.bannedFilters[name]
+ if has {
+ return fmt.Errorf("Filter '%s' is already banned.", name)
+ }
+ set.bannedFilters[name] = true
+
+ return nil
+}
+
+// FromCache is a convenient method to cache templates. It is thread-safe
+// and will only compile the template associated with a filename once.
+// If TemplateSet.Debug is true (for example during development phase),
+// FromCache() will not cache the template and instead recompile it on any
+// call (to make changes to a template live instantaneously).
+func (set *TemplateSet) FromCache(filename string) (*Template, error) {
+ if set.Debug {
+ // Recompile on any request
+ return set.FromFile(filename)
+ }
+ // Cache the template
+ cleanedFilename := set.resolveFilename(nil, filename)
+
+ set.templateCacheMutex.Lock()
+ defer set.templateCacheMutex.Unlock()
+
+ tpl, has := set.templateCache[cleanedFilename]
+
+ // Cache miss
+ if !has {
+ tpl, err := set.FromFile(cleanedFilename)
+ if err != nil {
+ return nil, err
+ }
+ set.templateCache[cleanedFilename] = tpl
+ return tpl, nil
+ }
+
+ // Cache hit
+ return tpl, nil
+}
+
+// FromString loads a template from string and returns a Template instance.
+func (set *TemplateSet) FromString(tpl string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ return newTemplateString(set, []byte(tpl))
+}
+
+// FromFile loads a template from a filename and returns a Template instance.
+func (set *TemplateSet) FromFile(filename string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ fd, err := set.loader.Get(set.resolveFilename(nil, filename))
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ ErrorMsg: err.Error(),
+ }
+ }
+ buf, err := ioutil.ReadAll(fd)
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ ErrorMsg: err.Error(),
+ }
+ }
+
+ return newTemplate(set, filename, false, buf)
+}
+
+// RenderTemplateString is a shortcut and renders a template string directly.
+// Panics when providing a malformed template or an error occurs during execution.
+func (set *TemplateSet) RenderTemplateString(s string, ctx Context) string {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromString(s))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// RenderTemplateFile is a shortcut and renders a template file directly.
+// Panics when providing a malformed template or an error occurs during execution.
+func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) string {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromFile(fn))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+func (set *TemplateSet) logf(format string, args ...interface{}) {
+ if set.Debug {
+ logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
+ }
+}
+
+// Logging function (internally used)
+func logf(format string, items ...interface{}) {
+ if debug {
+ logger.Printf(format, items...)
+ }
+}
+
+var (
+ debug bool // internal debugging
+ logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
+
+ // DefaultLoader allows the default un-sandboxed access to the local file
+ // system and is being used by the DefaultSet.
+ DefaultLoader = MustNewLocalFileSystemLoader("")
+
+ // DefaultSet is a set created for you for convinience reasons.
+ DefaultSet = NewSet("default", DefaultLoader)
+
+ // Methods on the default set
+ FromString = DefaultSet.FromString
+ FromFile = DefaultSet.FromFile
+ FromCache = DefaultSet.FromCache
+ RenderTemplateString = DefaultSet.RenderTemplateString
+ RenderTemplateFile = DefaultSet.RenderTemplateFile
+
+ // Globals for the default set
+ Globals = DefaultSet.Globals
+)
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/value.go b/Godeps/_workspace/src/github.com/flosch/pongo2/value.go
new file mode 100644
index 0000000..b1d4f7b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/value.go
@@ -0,0 +1,517 @@
+package pongo2
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+type Value struct {
+ val reflect.Value
+ safe bool // used to indicate whether a Value needs explicit escaping in the template
+}
+
+// AsValue converts any given value to a pongo2.Value
+// Usually being used within own functions passed to a template
+// through a Context or within filter functions.
+//
+// Example:
+// AsValue("my string")
+func AsValue(i interface{}) *Value {
+ return &Value{
+ val: reflect.ValueOf(i),
+ }
+}
+
+// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
+func AsSafeValue(i interface{}) *Value {
+ return &Value{
+ val: reflect.ValueOf(i),
+ safe: true,
+ }
+}
+
+func (v *Value) getResolvedValue() reflect.Value {
+ if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
+ return v.val.Elem()
+ }
+ return v.val
+}
+
+// Checks whether the underlying value is a string
+func (v *Value) IsString() bool {
+ return v.getResolvedValue().Kind() == reflect.String
+}
+
+// Checks whether the underlying value is a bool
+func (v *Value) IsBool() bool {
+ return v.getResolvedValue().Kind() == reflect.Bool
+}
+
+// Checks whether the underlying value is a float
+func (v *Value) IsFloat() bool {
+ return v.getResolvedValue().Kind() == reflect.Float32 ||
+ v.getResolvedValue().Kind() == reflect.Float64
+}
+
+// Checks whether the underlying value is an integer
+func (v *Value) IsInteger() bool {
+ return v.getResolvedValue().Kind() == reflect.Int ||
+ v.getResolvedValue().Kind() == reflect.Int8 ||
+ v.getResolvedValue().Kind() == reflect.Int16 ||
+ v.getResolvedValue().Kind() == reflect.Int32 ||
+ v.getResolvedValue().Kind() == reflect.Int64 ||
+ v.getResolvedValue().Kind() == reflect.Uint ||
+ v.getResolvedValue().Kind() == reflect.Uint8 ||
+ v.getResolvedValue().Kind() == reflect.Uint16 ||
+ v.getResolvedValue().Kind() == reflect.Uint32 ||
+ v.getResolvedValue().Kind() == reflect.Uint64
+}
+
+// Checks whether the underlying value is either an integer
+// or a float.
+func (v *Value) IsNumber() bool {
+ return v.IsInteger() || v.IsFloat()
+}
+
+// Checks whether the underlying value is NIL
+func (v *Value) IsNil() bool {
+ //fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
+ return !v.getResolvedValue().IsValid()
+}
+
+// Returns a string for the underlying value. If this value is not
+// of type string, pongo2 tries to convert it. Currently the following
+// types for underlying values are supported:
+//
+// 1. string
+// 2. int/uint (any size)
+// 3. float (any precision)
+// 4. bool
+// 5. time.Time
+// 6. String() will be called on the underlying value if provided
+//
+// NIL values will lead to an empty string. Unsupported types are leading
+// to their respective type name.
+func (v *Value) String() string {
+ if v.IsNil() {
+ return ""
+ }
+
+ switch v.getResolvedValue().Kind() {
+ case reflect.String:
+ return v.getResolvedValue().String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(v.getResolvedValue().Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
+ case reflect.Float32, reflect.Float64:
+ return fmt.Sprintf("%f", v.getResolvedValue().Float())
+ case reflect.Bool:
+ if v.Bool() {
+ return "True"
+ }
+ return "False"
+ case reflect.Struct:
+ if t, ok := v.Interface().(fmt.Stringer); ok {
+ return t.String()
+ }
+ }
+
+ logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
+ return v.getResolvedValue().String()
+}
+
+// Returns the underlying value as an integer (converts the underlying
+// value, if necessary). If it's not possible to convert the underlying value,
+// it will return 0.
+func (v *Value) Integer() int {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return int(v.getResolvedValue().Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int(v.getResolvedValue().Uint())
+ case reflect.Float32, reflect.Float64:
+ return int(v.getResolvedValue().Float())
+ case reflect.String:
+ // Try to convert from string to int (base 10)
+ f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
+ if err != nil {
+ return 0
+ }
+ return int(f)
+ default:
+ logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0
+ }
+}
+
+// Returns the underlying value as a float (converts the underlying
+// value, if necessary). If it's not possible to convert the underlying value,
+// it will return 0.0.
+func (v *Value) Float() float64 {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.getResolvedValue().Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return float64(v.getResolvedValue().Uint())
+ case reflect.Float32, reflect.Float64:
+ return v.getResolvedValue().Float()
+ case reflect.String:
+ // Try to convert from string to float64 (base 10)
+ f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
+ if err != nil {
+ return 0.0
+ }
+ return f
+ default:
+ logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0.0
+ }
+}
+
+// Returns the underlying value as bool. If the value is not bool, false
+// will always be returned. If you're looking for true/false-evaluation of the
+// underlying value, have a look on the IsTrue()-function.
+func (v *Value) Bool() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Bool:
+ return v.getResolvedValue().Bool()
+ default:
+ logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// Tries to evaluate the underlying value the Pythonic-way:
+//
+// Returns TRUE in one the following cases:
+//
+// * int != 0
+// * uint != 0
+// * float != 0.0
+// * len(array/chan/map/slice/string) > 0
+// * bool == true
+// * underlying value is a struct
+//
+// Otherwise returns always FALSE.
+func (v *Value) IsTrue() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.getResolvedValue().Int() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return v.getResolvedValue().Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return v.getResolvedValue().Float() != 0
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return v.getResolvedValue().Len() > 0
+ case reflect.Bool:
+ return v.getResolvedValue().Bool()
+ case reflect.Struct:
+ return true // struct instance is always true
+ default:
+ logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// Tries to negate the underlying value. It's mainly used for
+// the NOT-operator and in conjunction with a call to
+// return_value.IsTrue() afterwards.
+//
+// Example:
+// AsValue(1).Negate().IsTrue() == false
+func (v *Value) Negate() *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.Integer() != 0 {
+ return AsValue(0)
+ }
+ return AsValue(1)
+ case reflect.Float32, reflect.Float64:
+ if v.Float() != 0.0 {
+ return AsValue(float64(0.0))
+ }
+ return AsValue(float64(1.1))
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return AsValue(v.getResolvedValue().Len() == 0)
+ case reflect.Bool:
+ return AsValue(!v.getResolvedValue().Bool())
+ default:
+ logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue(true)
+ }
+}
+
+// Returns the length for an array, chan, map, slice or string.
+// Otherwise it will return 0.
+func (v *Value) Len() int {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return v.getResolvedValue().Len()
+ case reflect.String:
+ runes := []rune(v.getResolvedValue().String())
+ return len(runes)
+ default:
+ logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0
+ }
+}
+
+// Slices an array, slice or string. Otherwise it will
+// return an empty []int.
+func (v *Value) Slice(i, j int) *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice:
+ return AsValue(v.getResolvedValue().Slice(i, j).Interface())
+ case reflect.String:
+ runes := []rune(v.getResolvedValue().String())
+ return AsValue(string(runes[i:j]))
+ default:
+ logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue([]int{})
+ }
+}
+
+// Get the i-th item of an array, slice or string. Otherwise
+// it will return NIL.
+func (v *Value) Index(i int) *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice:
+ if i >= v.Len() {
+ return AsValue(nil)
+ }
+ return AsValue(v.getResolvedValue().Index(i).Interface())
+ case reflect.String:
+ //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
+ s := v.getResolvedValue().String()
+ runes := []rune(s)
+ if i < len(runes) {
+ return AsValue(string(runes[i]))
+ }
+ return AsValue("")
+ default:
+ logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue([]int{})
+ }
+}
+
+// Contains checks whether the underlying value (which must be of type struct, map,
+// string, array or slice) contains of another Value (e. g. used to check
+// whether a struct contains of a specific field or a map contains a specific key).
+//
+// Example:
+// AsValue("Hello, World!").Contains(AsValue("World")) == true
+func (v *Value) Contains(other *Value) bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Struct:
+ fieldValue := v.getResolvedValue().FieldByName(other.String())
+ return fieldValue.IsValid()
+ case reflect.Map:
+ var mapValue reflect.Value
+ switch other.Interface().(type) {
+ case int:
+ mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
+ case string:
+ mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
+ default:
+ logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
+ return false
+ }
+
+ return mapValue.IsValid()
+ case reflect.String:
+ return strings.Contains(v.getResolvedValue().String(), other.String())
+
+ case reflect.Slice, reflect.Array:
+ for i := 0; i < v.getResolvedValue().Len(); i++ {
+ item := v.getResolvedValue().Index(i)
+ if other.Interface() == item.Interface() {
+ return true
+ }
+ }
+ return false
+
+ default:
+ logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// Checks whether the underlying value is of type array, slice or string.
+// You normally would use CanSlice() before using the Slice() operation.
+func (v *Value) CanSlice() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ return true
+ }
+ return false
+}
+
+// Iterates over a map, array, slice or a string. It calls the
+// function's first argument for every value with the following arguments:
+//
+// idx current 0-index
+// count total amount of items
+// key *Value for the key or item
+// value *Value (only for maps, the respective value for a specific key)
+//
+// If the underlying value has no items or is not one of the types above,
+// the empty function (function's second argument) will be called.
+func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
+ v.IterateOrder(fn, empty, false, false)
+}
+
+// Like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
+// not affect the iteration through a map because maps don't have any particular order.
+func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Map:
+ keys := sortedKeys(v.getResolvedValue().MapKeys())
+ if sorted {
+ if reverse {
+ sort.Sort(sort.Reverse(keys))
+ } else {
+ sort.Sort(keys)
+ }
+ }
+ keyLen := len(keys)
+ for idx, key := range keys {
+ value := v.getResolvedValue().MapIndex(key)
+ if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
+ return
+ }
+ }
+ if keyLen == 0 {
+ empty()
+ }
+ return // done
+ case reflect.Array, reflect.Slice:
+ var items valuesList
+
+ itemCount := v.getResolvedValue().Len()
+ for i := 0; i < itemCount; i++ {
+ items = append(items, &Value{val: v.getResolvedValue().Index(i)})
+ }
+
+ if sorted {
+ if reverse {
+ sort.Sort(sort.Reverse(items))
+ } else {
+ sort.Sort(items)
+ }
+ } else {
+ if reverse {
+ for i := 0; i < itemCount/2; i++ {
+ items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
+ }
+ }
+ }
+
+ if len(items) > 0 {
+ for idx, item := range items {
+ if !fn(idx, itemCount, item, nil) {
+ return
+ }
+ }
+ } else {
+ empty()
+ }
+ return // done
+ case reflect.String:
+ if sorted {
+ // TODO(flosch): Handle sorted
+ panic("TODO: handle sort for type string")
+ }
+
+ // TODO(flosch): Not utf8-compatible (utf8-decoding neccessary)
+ charCount := v.getResolvedValue().Len()
+ if charCount > 0 {
+ if reverse {
+ for i := charCount - 1; i >= 0; i-- {
+ if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
+ return
+ }
+ }
+ } else {
+ for i := 0; i < charCount; i++ {
+ if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
+ return
+ }
+ }
+ }
+ } else {
+ empty()
+ }
+ return // done
+ default:
+ logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ }
+ empty()
+}
+
+// Gives you access to the underlying value.
+func (v *Value) Interface() interface{} {
+ if v.val.IsValid() {
+ return v.val.Interface()
+ }
+ return nil
+}
+
+// Checks whether two values are containing the same value or object.
+func (v *Value) EqualValueTo(other *Value) bool {
+ // comparison of uint with int fails using .Interface()-comparison (see issue #64)
+ if v.IsInteger() && other.IsInteger() {
+ return v.Integer() == other.Integer()
+ }
+ return v.Interface() == other.Interface()
+}
+
+type sortedKeys []reflect.Value
+
+func (sk sortedKeys) Len() int {
+ return len(sk)
+}
+
+func (sk sortedKeys) Less(i, j int) bool {
+ vi := &Value{val: sk[i]}
+ vj := &Value{val: sk[j]}
+ switch {
+ case vi.IsInteger() && vj.IsInteger():
+ return vi.Integer() < vj.Integer()
+ case vi.IsFloat() && vj.IsFloat():
+ return vi.Float() < vj.Float()
+ default:
+ return vi.String() < vj.String()
+ }
+}
+
+func (sk sortedKeys) Swap(i, j int) {
+ sk[i], sk[j] = sk[j], sk[i]
+}
+
+type valuesList []*Value
+
+func (vl valuesList) Len() int {
+ return len(vl)
+}
+
+func (vl valuesList) Less(i, j int) bool {
+ vi := vl[i]
+ vj := vl[j]
+ switch {
+ case vi.IsInteger() && vj.IsInteger():
+ return vi.Integer() < vj.Integer()
+ case vi.IsFloat() && vj.IsFloat():
+ return vi.Float() < vj.Float()
+ default:
+ return vi.String() < vj.String()
+ }
+}
+
+func (vl valuesList) Swap(i, j int) {
+ vl[i], vl[j] = vl[j], vl[i]
+}
diff --git a/Godeps/_workspace/src/github.com/flosch/pongo2/variable.go b/Godeps/_workspace/src/github.com/flosch/pongo2/variable.go
new file mode 100644
index 0000000..6dd6dc4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/flosch/pongo2/variable.go
@@ -0,0 +1,662 @@
+package pongo2
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ varTypeInt = iota
+ varTypeIdent
+)
+
+type variablePart struct {
+ typ int
+ s string
+ i int
+
+ isFunctionCall bool
+ callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
+}
+
+type functionCallArgument interface {
+ Evaluate(*ExecutionContext) (*Value, *Error)
+}
+
+// TODO: Add location tokens
+type stringResolver struct {
+ locationToken *Token
+ val string
+}
+
+type intResolver struct {
+ locationToken *Token
+ val int
+}
+
+type floatResolver struct {
+ locationToken *Token
+ val float64
+}
+
+type boolResolver struct {
+ locationToken *Token
+ val bool
+}
+
+type variableResolver struct {
+ locationToken *Token
+
+ parts []*variablePart
+}
+
+type nodeFilteredVariable struct {
+ locationToken *Token
+
+ resolver IEvaluator
+ filterChain []*filterCall
+}
+
+type nodeVariable struct {
+ locationToken *Token
+ expr IEvaluator
+}
+
+func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := v.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := vr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := s.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := i.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := f.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := b.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (v *nodeFilteredVariable) GetPositionToken() *Token {
+ return v.locationToken
+}
+
+func (vr *variableResolver) GetPositionToken() *Token {
+ return vr.locationToken
+}
+
+func (s *stringResolver) GetPositionToken() *Token {
+ return s.locationToken
+}
+
+func (i *intResolver) GetPositionToken() *Token {
+ return i.locationToken
+}
+
+func (f *floatResolver) GetPositionToken() *Token {
+ return f.locationToken
+}
+
+func (b *boolResolver) GetPositionToken() *Token {
+ return b.locationToken
+}
+
+func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(s.val), nil
+}
+
+func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(i.val), nil
+}
+
+func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(f.val), nil
+}
+
+func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(b.val), nil
+}
+
+func (s *stringResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (i *intResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (f *floatResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (b *boolResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (nv *nodeVariable) FilterApplied(name string) bool {
+ return nv.expr.FilterApplied(name)
+}
+
+func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := nv.expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
+ // apply escape filter
+ value, err = filters["escape"](value, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (vr *variableResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (vr *variableResolver) String() string {
+ parts := make([]string, 0, len(vr.parts))
+ for _, p := range vr.parts {
+ switch p.typ {
+ case varTypeInt:
+ parts = append(parts, strconv.Itoa(p.i))
+ case varTypeIdent:
+ parts = append(parts, p.s)
+ default:
+ panic("unimplemented")
+ }
+ }
+ return strings.Join(parts, ".")
+}
+
+func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
+ var current reflect.Value
+ var isSafe bool
+
+ for idx, part := range vr.parts {
+ if idx == 0 {
+ // We're looking up the first part of the variable.
+ // First we're having a look in our private
+ // context (e. g. information provided by tags, like the forloop)
+ val, inPrivate := ctx.Private[vr.parts[0].s]
+ if !inPrivate {
+ // Nothing found? Then have a final lookup in the public context
+ val = ctx.Public[vr.parts[0].s]
+ }
+ current = reflect.ValueOf(val) // Get the initial value
+ } else {
+ // Next parts, resolve it from current
+
+ // Before resolving the pointer, let's see if we have a method to call
+ // Problem with resolving the pointer is we're changing the receiver
+ isFunc := false
+ if part.typ == varTypeIdent {
+ funcValue := current.MethodByName(part.s)
+ if funcValue.IsValid() {
+ current = funcValue
+ isFunc = true
+ }
+ }
+
+ if !isFunc {
+ // If current a pointer, resolve it
+ if current.Kind() == reflect.Ptr {
+ current = current.Elem()
+ if !current.IsValid() {
+ // Value is not valid (anymore)
+ return AsValue(nil), nil
+ }
+ }
+
+ // Look up which part must be called now
+ switch part.typ {
+ case varTypeInt:
+ // Calling an index is only possible for:
+ // * slices/arrays/strings
+ switch current.Kind() {
+ case reflect.String, reflect.Array, reflect.Slice:
+ if current.Len() > part.i {
+ current = current.Index(part.i)
+ } else {
+ return nil, fmt.Errorf("Index out of range: %d (variable %s)", part.i, vr.String())
+ }
+ default:
+ return nil, fmt.Errorf("Can't access an index on type %s (variable %s)",
+ current.Kind().String(), vr.String())
+ }
+ case varTypeIdent:
+ // debugging:
+ // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
+
+ // Calling a field or key
+ switch current.Kind() {
+ case reflect.Struct:
+ current = current.FieldByName(part.s)
+ case reflect.Map:
+ current = current.MapIndex(reflect.ValueOf(part.s))
+ default:
+ return nil, fmt.Errorf("Can't access a field by name on type %s (variable %s)",
+ current.Kind().String(), vr.String())
+ }
+ default:
+ panic("unimplemented")
+ }
+ }
+ }
+
+ if !current.IsValid() {
+ // Value is not valid (anymore)
+ return AsValue(nil), nil
+ }
+
+ // If current is a reflect.ValueOf(pongo2.Value), then unpack it
+ // Happens in function calls (as a return value) or by injecting
+ // into the execution context (e.g. in a for-loop)
+ if current.Type() == reflect.TypeOf(&Value{}) {
+ tmpValue := current.Interface().(*Value)
+ current = tmpValue.val
+ isSafe = tmpValue.safe
+ }
+
+ // Check whether this is an interface and resolve it where required
+ if current.Kind() == reflect.Interface {
+ current = reflect.ValueOf(current.Interface())
+ }
+
+ // Check if the part is a function call
+ if part.isFunctionCall || current.Kind() == reflect.Func {
+ // Check for callable
+ if current.Kind() != reflect.Func {
+ return nil, fmt.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
+ }
+
+ // Check for correct function syntax and types
+ // func(*Value, ...) *Value
+ t := current.Type()
+
+ // Input arguments
+ if len(part.callingArgs) != t.NumIn() && !(len(part.callingArgs) >= t.NumIn()-1 && t.IsVariadic()) {
+ return nil,
+ fmt.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
+ t.NumIn(), vr.String(), len(part.callingArgs))
+ }
+
+ // Output arguments
+ if t.NumOut() != 1 {
+ return nil, fmt.Errorf("'%s' must have exactly 1 output argument", vr.String())
+ }
+
+ // Evaluate all parameters
+ var parameters []reflect.Value
+
+ numArgs := t.NumIn()
+ isVariadic := t.IsVariadic()
+ var fnArg reflect.Type
+
+ for idx, arg := range part.callingArgs {
+ pv, err := arg.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if isVariadic {
+ if idx >= t.NumIn()-1 {
+ fnArg = t.In(numArgs - 1).Elem()
+ } else {
+ fnArg = t.In(idx)
+ }
+ } else {
+ fnArg = t.In(idx)
+ }
+
+ if fnArg != reflect.TypeOf(new(Value)) {
+ // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
+ if !isVariadic {
+ if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
+ return nil, fmt.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
+ idx, vr.String(), fnArg.String(), pv.Interface())
+ }
+ // Function's argument has another type, using the interface-value
+ parameters = append(parameters, reflect.ValueOf(pv.Interface()))
+ } else {
+ if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
+ return nil, fmt.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
+ vr.String(), fnArg.String(), pv.Interface())
+ }
+ // Function's argument has another type, using the interface-value
+ parameters = append(parameters, reflect.ValueOf(pv.Interface()))
+ }
+ } else {
+ // Function's argument is a *pongo2.Value
+ parameters = append(parameters, reflect.ValueOf(pv))
+ }
+ }
+
+ // Check if any of the values are invalid
+ for _, p := range parameters {
+ if p.Kind() == reflect.Invalid {
+ return nil, fmt.Errorf("Calling a function using an invalid parameter")
+ }
+ }
+
+ // Call it and get first return parameter back
+ rv := current.Call(parameters)[0]
+
+ if rv.Type() != reflect.TypeOf(new(Value)) {
+ current = reflect.ValueOf(rv.Interface())
+ } else {
+ // Return the function call value
+ current = rv.Interface().(*Value).val
+ isSafe = rv.Interface().(*Value).safe
+ }
+ }
+
+ if !current.IsValid() {
+ // Value is not valid (e. g. NIL value)
+ return AsValue(nil), nil
+ }
+ }
+
+ return &Value{val: current, safe: isSafe}, nil
+}
+
+func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ value, err := vr.resolve(ctx)
+ if err != nil {
+ return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
+ }
+ return value, nil
+}
+
+func (v *nodeFilteredVariable) FilterApplied(name string) bool {
+ for _, filter := range v.filterChain {
+ if filter.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ value, err := v.resolver.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, filter := range v.filterChain {
+ value, err = filter.Execute(value, ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return value, nil
+}
+
+// IDENT | IDENT.(IDENT|NUMBER)...
+func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
+ t := p.Current()
+
+ if t == nil {
+ return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
+ }
+
+ // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
+ switch t.Typ {
+ case TokenNumber:
+ p.Consume()
+
+ // One exception to the rule that we don't have float64 literals is at the beginning
+ // of an expression (or a variable name). Since we know we started with an integer
+ // which can't obviously be a variable name, we can check whether the first number
+ // is followed by dot (and then a number again). If so we're converting it to a float64.
+
+ if p.Match(TokenSymbol, ".") != nil {
+ // float64
+ t2 := p.MatchType(TokenNumber)
+ if t2 == nil {
+ return nil, p.Error("Expected a number after the '.'.", nil)
+ }
+ f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
+ if err != nil {
+ return nil, p.Error(err.Error(), t)
+ }
+ fr := &floatResolver{
+ locationToken: t,
+ val: f,
+ }
+ return fr, nil
+ }
+ i, err := strconv.Atoi(t.Val)
+ if err != nil {
+ return nil, p.Error(err.Error(), t)
+ }
+ nr := &intResolver{
+ locationToken: t,
+ val: i,
+ }
+ return nr, nil
+
+ case TokenString:
+ p.Consume()
+ sr := &stringResolver{
+ locationToken: t,
+ val: t.Val,
+ }
+ return sr, nil
+ case TokenKeyword:
+ p.Consume()
+ switch t.Val {
+ case "true":
+ br := &boolResolver{
+ locationToken: t,
+ val: true,
+ }
+ return br, nil
+ case "false":
+ br := &boolResolver{
+ locationToken: t,
+ val: false,
+ }
+ return br, nil
+ default:
+ return nil, p.Error("This keyword is not allowed here.", nil)
+ }
+ }
+
+ resolver := &variableResolver{
+ locationToken: t,
+ }
+
+ // First part of a variable MUST be an identifier
+ if t.Typ != TokenIdentifier {
+ return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
+ }
+
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeIdent,
+ s: t.Val,
+ })
+
+ p.Consume() // we consumed the first identifier of the variable name
+
+variableLoop:
+ for p.Remaining() > 0 {
+ t = p.Current()
+
+ if p.Match(TokenSymbol, ".") != nil {
+ // Next variable part (can be either NUMBER or IDENT)
+ t2 := p.Current()
+ if t2 != nil {
+ switch t2.Typ {
+ case TokenIdentifier:
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeIdent,
+ s: t2.Val,
+ })
+ p.Consume() // consume: IDENT
+ continue variableLoop
+ case TokenNumber:
+ i, err := strconv.Atoi(t2.Val)
+ if err != nil {
+ return nil, p.Error(err.Error(), t2)
+ }
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeInt,
+ i: i,
+ })
+ p.Consume() // consume: NUMBER
+ continue variableLoop
+ default:
+ return nil, p.Error("This token is not allowed within a variable name.", t2)
+ }
+ } else {
+ // EOF
+ return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
+ p.lastToken)
+ }
+ } else if p.Match(TokenSymbol, "(") != nil {
+ // Function call
+ // FunctionName '(' Comma-separated list of expressions ')'
+ part := resolver.parts[len(resolver.parts)-1]
+ part.isFunctionCall = true
+ argumentLoop:
+ for {
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
+ }
+
+ if p.Peek(TokenSymbol, ")") == nil {
+ // No closing bracket, so we're parsing an expression
+ exprArg, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ part.callingArgs = append(part.callingArgs, exprArg)
+
+ if p.Match(TokenSymbol, ")") != nil {
+ // If there's a closing bracket after an expression, we will stop parsing the arguments
+ break argumentLoop
+ } else {
+ // If there's NO closing bracket, there MUST be an comma
+ if p.Match(TokenSymbol, ",") == nil {
+ return nil, p.Error("Missing comma or closing bracket after argument.", nil)
+ }
+ }
+ } else {
+ // We got a closing bracket, so stop parsing arguments
+ p.Consume()
+ break argumentLoop
+ }
+
+ }
+ // We're done parsing the function call, next variable part
+ continue variableLoop
+ }
+
+ // No dot or function call? Then we're done with the variable parsing
+ break
+ }
+
+ return resolver, nil
+}
+
+func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
+ v := &nodeFilteredVariable{
+ locationToken: p.Current(),
+ }
+
+ // Parse the variable name
+ resolver, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ v.resolver = resolver
+
+ // Parse all the filters
+filterLoop:
+ for p.Match(TokenSymbol, "|") != nil {
+ // Parse one single filter
+ filter, err := p.parseFilter()
+ if err != nil {
+ return nil, err
+ }
+
+ // Check sandbox filter restriction
+ if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
+ return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
+ }
+
+ v.filterChain = append(v.filterChain, filter)
+
+ continue filterLoop
+ }
+
+ return v, nil
+}
+
+func (p *Parser) parseVariableElement() (INode, *Error) {
+ node := &nodeVariable{
+ locationToken: p.Current(),
+ }
+
+ p.Consume() // consume '{{'
+
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expr = expr
+
+ if p.Match(TokenSymbol, "}}") == nil {
+ return nil, p.Error("'}}' expected", nil)
+ }
+
+ return node, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore
new file mode 100644
index 0000000..5f6b48e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore
@@ -0,0 +1,2 @@
+# temporary symlink for testing
+testing/data/symlink
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
new file mode 100644
index 0000000..d062464
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+sudo: required
+go:
+ - 1.3.3
+ - 1.4.2
+ - 1.5.1
+ - tip
+env:
+ - GOARCH=amd64
+ - GOARCH=386
+script:
+ - make test
+ - DOCKER_HOST=tcp://127.0.0.1:2375 make integration
+services:
+ - docker
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
new file mode 100644
index 0000000..b6c3d80
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
@@ -0,0 +1,106 @@
+# This is the official list of go-dockerclient authors for copyright purposes.
+
+Abhishek Chanda
+Adam Bell-Hanssen
+Adrien Kohlbecker
+Aldrin Leal
+Andreas Jaekle
+Andrews Medina
+Artem Sidorenko
+Andy Goldstein
+Ben Marini
+Ben McCann
+Brendan Fosberry
+Brian Lalor
+Brian Palmer
+Bryan Boreham
+Burke Libbey
+Carlos Diaz-Padron
+Cesar Wong
+Cezar Sa Espinola
+Cheah Chu Yeow
+cheneydeng
+Chris Bednarski
+CMGS
+Craig Jellick
+Dan Williams
+Daniel, Dao Quang Minh
+Daniel Garcia
+Darren Shepherd
+Dave Choi
+David Huie
+Dawn Chen
+Dinesh Subhraveti
+Ed
+Elias G. Schneevoigt
+Erez Horev
+Eric Anderson
+Ewout Prangsma
+Fabio Rehm
+Fatih Arslan
+Flavia Missi
+Francisco Souza
+Grégoire Delattre
+Guillermo Álvarez Fernández
+He Simei
+Ivan Mikushin
+James Bardin
+Jari Kolehmainen
+Jason Wilder
+Jawher Moussa
+Jean-Baptiste Dalido
+Jeff Mitchell
+Jeffrey Hulten
+Jen Andre
+Johan Euphrosine
+Kamil Domanski
+Karan Misra
+Kim, Hirokuni
+Kyle Allan
+Liron Levin
+Liu Peng
+Lorenz Leutgeb
+Lucas Clemente
+Lucas Weiblen
+Mantas Matelis
+Martin Sweeney
+Máximo Cuadros Ortiz
+Michal Fojtik
+Mike Dillon
+Mrunal Patel
+Nick Ethier
+Omeid Matten
+Orivej Desh
+Paul Bellamy
+Paul Morie
+Paul Weil
+Peter Edge
+Peter Jihoon Kim
+Phil Lu
+Philippe Lafoucrière
+Rafe Colton
+Rob Miller
+Robert Williamson
+Salvador Gironès
+Sam Rijs
+Sami Wagiaalla
+Samuel Karp
+Silas Sewell
+Simon Eskildsen
+Simon Menke
+Skolos
+Soulou
+Sridhar Ratnakumar
+Summer Mousa
+Sunjin Lee
+Tarsis Azevedo
+Tim Schindler
+Tobi Knaup
+Tonic
+ttyh061
+Victor Marmol
+Vincenzo Prignano
+Wiliam Souza
+Ye Yin
+Yu, Zou
+Yuriy Bogdanov
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
new file mode 100644
index 0000000..7066344
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
@@ -0,0 +1,6 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+You can find the Docker license at the following link:
+https://raw.githubusercontent.com/docker/docker/master/LICENSE
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
new file mode 100644
index 0000000..4e11de1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015, go-dockerclient authors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile
new file mode 100644
index 0000000..4d5d840
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile
@@ -0,0 +1,51 @@
+.PHONY: \
+ all \
+ vendor \
+ lint \
+ vet \
+ fmt \
+ fmtcheck \
+ pretest \
+ test \
+ integration \
+ cov \
+ clean
+
+SRCS = $(shell git ls-files '*.go' | grep -v '^external/')
+PKGS = ./. ./testing
+
+all: test
+
+vendor:
+ @ go get -v github.com/mjibson/party
+ party -d external -c -u
+
+lint:
+ @ go get -v github.com/golang/lint/golint
+ $(foreach file,$(SRCS),golint $(file) || exit;)
+
+vet:
+ @-go get -v golang.org/x/tools/cmd/vet
+ $(foreach pkg,$(PKGS),go vet $(pkg);)
+
+fmt:
+ gofmt -w $(SRCS)
+
+fmtcheck:
+ $(foreach file,$(SRCS),gofmt -d $(file);)
+
+pretest: lint vet fmtcheck
+
+test: pretest
+ $(foreach pkg,$(PKGS),go test $(pkg) || exit;)
+
+integration:
+ go test -tags docker_integration -run TestIntegration -v
+
+cov:
+ @ go get -v github.com/axw/gocov/gocov
+ @ go get golang.org/x/tools/cmd/cover
+ gocov test | gocov report
+
+clean:
+ $(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
new file mode 100644
index 0000000..9f3c0ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
@@ -0,0 +1,106 @@
+# go-dockerclient
+
+[![Drone](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest)
+[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
+[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
+
+This package presents a client for the Docker remote API. It also provides
+support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/).
+
+This package also provides support for docker's network API, which is a simple
+passthrough to the libnetwork remote API. Note that docker's network API is
+only available in docker 1.8 and above, and only enabled in docker if
+DOCKER_EXPERIMENTAL is defined during the docker build process.
+
+For more details, check the [remote API documentation](http://docs.docker.com/en/latest/reference/api/docker_remote_api/).
+
+## Vendoring
+
+If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
+please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
+is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
+for details.
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ endpoint := "unix:///var/run/docker.sock"
+ client, _ := docker.NewClient(endpoint)
+ imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
+ for _, img := range imgs {
+ fmt.Println("ID: ", img.ID)
+ fmt.Println("RepoTags: ", img.RepoTags)
+ fmt.Println("Created: ", img.Created)
+ fmt.Println("Size: ", img.Size)
+ fmt.Println("VirtualSize: ", img.VirtualSize)
+ fmt.Println("ParentId: ", img.ParentID)
+ }
+}
+```
+
+## Using with TLS
+
+In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ endpoint := "tcp://[ip]:[port]"
+ path := os.Getenv("DOCKER_CERT_PATH")
+ ca := fmt.Sprintf("%s/ca.pem", path)
+ cert := fmt.Sprintf("%s/cert.pem", path)
+ key := fmt.Sprintf("%s/key.pem", path)
+ client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
+ // use client
+}
+```
+
+If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
+`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
+
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ client, _ := docker.NewClientFromEnv()
+ // use client
+}
+```
+
+See the documentation for more details.
+
+## Developing
+
+All development commands can be seen in the [Makefile](Makefile).
+
+Commited code must pass:
+
+* [golint](https://github.com/golang/lint)
+* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
+* [gofmt](https://golang.org/cmd/gofmt)
+* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
+
+Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go
new file mode 100644
index 0000000..30e3af3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go
@@ -0,0 +1,136 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+)
+
+// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
+var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
+
+// AuthConfiguration represents authentication options to use in the PushImage
+// method. It represents the authentication in the Docker index server.
+type AuthConfiguration struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Email string `json:"email,omitempty"`
+ ServerAddress string `json:"serveraddress,omitempty"`
+}
+
+// AuthConfigurations represents authentication options to use for the
+// PushImage method accommodating the new X-Registry-Config header
+type AuthConfigurations struct {
+ Configs map[string]AuthConfiguration `json:"configs"`
+}
+
+// AuthConfigurations119 is used to serialize a set of AuthConfigurations
+// for Docker API >= 1.19.
+type AuthConfigurations119 map[string]AuthConfiguration
+
+// dockerConfig represents a registry authentation configuration from the
+// .dockercfg file.
+type dockerConfig struct {
+ Auth string `json:"auth"`
+ Email string `json:"email"`
+}
+
+// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
+// ~/.dockercfg file.
+func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
+ var r io.Reader
+ var err error
+ p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
+ r, err = os.Open(p)
+ if err != nil {
+ p := path.Join(os.Getenv("HOME"), ".dockercfg")
+ r, err = os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return NewAuthConfigurations(r)
+}
+
+// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
+// same format as the .dockercfg file.
+func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
+ var auth *AuthConfigurations
+ confs, err := parseDockerConfig(r)
+ if err != nil {
+ return nil, err
+ }
+ auth, err = authConfigs(confs)
+ if err != nil {
+ return nil, err
+ }
+ return auth, nil
+}
+
+func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(r)
+ byteData := buf.Bytes()
+
+ var confsWrapper map[string]map[string]dockerConfig
+ if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
+ if confs, ok := confsWrapper["auths"]; ok {
+ return confs, nil
+ }
+ }
+
+ var confs map[string]dockerConfig
+ if err := json.Unmarshal(byteData, &confs); err != nil {
+ return nil, err
+ }
+ return confs, nil
+}
+
+// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
+func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
+ c := &AuthConfigurations{
+ Configs: make(map[string]AuthConfiguration),
+ }
+ for reg, conf := range confs {
+ data, err := base64.StdEncoding.DecodeString(conf.Auth)
+ if err != nil {
+ return nil, err
+ }
+ userpass := strings.Split(string(data), ":")
+ if len(userpass) != 2 {
+ return nil, ErrCannotParseDockercfg
+ }
+ c.Configs[reg] = AuthConfiguration{
+ Email: conf.Email,
+ Username: userpass[0],
+ Password: userpass[1],
+ ServerAddress: reg,
+ }
+ }
+ return c, nil
+}
+
+// AuthCheck validates the given credentials. It returns nil if successful.
+//
+// See https://goo.gl/m2SleN for more details.
+func (c *Client) AuthCheck(conf *AuthConfiguration) error {
+ if conf == nil {
+ return fmt.Errorf("conf is nil")
+ }
+ resp, err := c.do("POST", "/auth", doOptions{data: conf})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
new file mode 100644
index 0000000..d133594
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
@@ -0,0 +1,43 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import "fmt"
+
+// ChangeType is a type for constants indicating the type of change
+// in a container
+type ChangeType int
+
+const (
+ // ChangeModify is the ChangeType for container modifications
+ ChangeModify ChangeType = iota
+
+ // ChangeAdd is the ChangeType for additions to a container
+ ChangeAdd
+
+ // ChangeDelete is the ChangeType for deletions from a container
+ ChangeDelete
+)
+
+// Change represents a change in a container.
+//
+// See https://goo.gl/9GsTIF for more details.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
new file mode 100644
index 0000000..1feb3c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
@@ -0,0 +1,872 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package docker provides a client for the Docker remote API.
+//
+// See https://goo.gl/G3plxW for more details on the remote API.
+package docker
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
+)
+
+const userAgent = "go-dockerclient"
+
+var (
+ // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
+ ErrInvalidEndpoint = errors.New("invalid endpoint")
+
+ // ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
+ ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
+
+ apiVersion112, _ = NewAPIVersion("1.12")
+
+ apiVersion119, _ = NewAPIVersion("1.19")
+)
+
+// APIVersion is an internal representation of a version of the Remote API.
+type APIVersion []int
+
+// NewAPIVersion returns an instance of APIVersion for the given string.
+//
+// The given string must be in the form .., where ,
+// and are integer numbers.
+func NewAPIVersion(input string) (APIVersion, error) {
+ if !strings.Contains(input, ".") {
+ return nil, fmt.Errorf("Unable to parse version %q", input)
+ }
+ arr := strings.Split(input, ".")
+ ret := make(APIVersion, len(arr))
+ var err error
+ for i, val := range arr {
+ ret[i], err = strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
+ }
+ }
+ return ret, nil
+}
+
+func (version APIVersion) String() string {
+ var str string
+ for i, val := range version {
+ str += strconv.Itoa(val)
+ if i < len(version)-1 {
+ str += "."
+ }
+ }
+ return str
+}
+
+// LessThan is a function for comparing APIVersion structs
+func (version APIVersion) LessThan(other APIVersion) bool {
+ return version.compare(other) < 0
+}
+
+// LessThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) <= 0
+}
+
+// GreaterThan is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThan(other APIVersion) bool {
+ return version.compare(other) > 0
+}
+
+// GreaterThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) >= 0
+}
+
+func (version APIVersion) compare(other APIVersion) int {
+ for i, v := range version {
+ if i <= len(other)-1 {
+ otherVersion := other[i]
+
+ if v < otherVersion {
+ return -1
+ } else if v > otherVersion {
+ return 1
+ }
+ }
+ }
+ if len(version) > len(other) {
+ return 1
+ }
+ if len(version) < len(other) {
+ return -1
+ }
+ return 0
+}
+
+// Client is the basic type of this package. It provides methods for
+// interaction with the API.
+type Client struct {
+ SkipServerVersionCheck bool
+ HTTPClient *http.Client
+ TLSConfig *tls.Config
+ Dialer *net.Dialer
+
+ endpoint string
+ endpointURL *url.URL
+ eventMonitor *eventMonitoringState
+ requestedAPIVersion APIVersion
+ serverAPIVersion APIVersion
+ expectedAPIVersion APIVersion
+ unixHTTPClient *http.Client
+}
+
+// NewClient returns a Client instance ready for communication with the given
+// server endpoint. It will use the latest remote API version available in the
+// server.
+func NewClient(endpoint string) (*Client, error) {
+ client, err := NewVersionedClient(endpoint, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates . It will use the latest remote API version
+// available in the server.
+func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
+ client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file). It will use the latest remote API version available in the server.
+func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
+ client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClient returns a Client instance ready for communication with
+// the given server endpoint, using a specific remote API version.
+func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, false)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &Client{
+ HTTPClient: &http.Client{},
+ Dialer: &net.Dialer{},
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }, nil
+}
+
+// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
+func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
+}
+
+// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates, using a specific remote API version.
+func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ certPEMBlock, err := ioutil.ReadFile(cert)
+ if err != nil {
+ return nil, err
+ }
+ keyPEMBlock, err := ioutil.ReadFile(key)
+ if err != nil {
+ return nil, err
+ }
+ caPEMCert, err := ioutil.ReadFile(ca)
+ if err != nil {
+ return nil, err
+ }
+ return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
+}
+
+// NewClientFromEnv returns a Client instance ready for communication created from
+// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
+//
+// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
+// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
+func NewClientFromEnv() (*Client, error) {
+ client, err := NewVersionedClientFromEnv("")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
+// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
+// and using a specific remote API version.
+//
+// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
+// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
+func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
+ dockerEnv, err := getDockerEnv()
+ if err != nil {
+ return nil, err
+ }
+ dockerHost := dockerEnv.dockerHost
+ if dockerEnv.dockerTLSVerify {
+ parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
+ }
+ cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
+ key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
+ ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
+ return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
+ }
+ return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
+}
+
+// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file), using a specific remote API version.
+func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, true)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if certPEMBlock == nil || keyPEMBlock == nil {
+ return nil, errors.New("Both cert and key are required")
+ }
+ tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
+ if caPEMCert == nil {
+ tlsConfig.InsecureSkipVerify = true
+ } else {
+ caPool := x509.NewCertPool()
+ if !caPool.AppendCertsFromPEM(caPEMCert) {
+ return nil, errors.New("Could not add RootCA pem")
+ }
+ tlsConfig.RootCAs = caPool
+ }
+ tr := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ HTTPClient: &http.Client{Transport: tr},
+ TLSConfig: tlsConfig,
+ Dialer: &net.Dialer{},
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }, nil
+}
+
+func (c *Client) checkAPIVersion() error {
+ serverAPIVersionString, err := c.getServerAPIVersionString()
+ if err != nil {
+ return err
+ }
+ c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
+ if err != nil {
+ return err
+ }
+ if c.requestedAPIVersion == nil {
+ c.expectedAPIVersion = c.serverAPIVersion
+ } else {
+ c.expectedAPIVersion = c.requestedAPIVersion
+ }
+ return nil
+}
+
+// Endpoint returns the current endpoint. It's useful for getting the endpoint
+// when using functions that get this data from the environment (like
+// NewClientFromEnv.
+func (c *Client) Endpoint() string {
+ return c.endpoint
+}
+
+// Ping pings the docker server
+//
+// See https://goo.gl/kQCfJj for more details.
+func (c *Client) Ping() error {
+ path := "/_ping"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return newError(resp)
+ }
+ resp.Body.Close()
+ return nil
+}
+
+func (c *Client) getServerAPIVersionString() (version string, err error) {
+ resp, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
+ }
+ var versionResponse map[string]interface{}
+ if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
+ return "", err
+ }
+ if version, ok := (versionResponse["ApiVersion"]).(string); ok {
+ return version, nil
+ }
+ return "", nil
+}
+
+type doOptions struct {
+ data interface{}
+ forceJSON bool
+ headers map[string]string
+}
+
+func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
+ var params io.Reader
+ if doOptions.data != nil || doOptions.forceJSON {
+ buf, err := json.Marshal(doOptions.data)
+ if err != nil {
+ return nil, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ }
+ httpClient := c.HTTPClient
+ protocol := c.endpointURL.Scheme
+ var u string
+ if protocol == "unix" {
+ httpClient = c.unixClient()
+ u = c.getFakeUnixURL(path)
+ } else {
+ u = c.getURL(path)
+ }
+ req, err := http.NewRequest(method, u, params)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if doOptions.data != nil {
+ req.Header.Set("Content-Type", "application/json")
+ } else if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+
+ for k, v := range doOptions.headers {
+ req.Header.Set(k, v)
+ }
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, ErrConnectionRefused
+ }
+ return nil, err
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, newError(resp)
+ }
+ return resp, nil
+}
+
+type streamOptions struct {
+ setRawTerminal bool
+ rawJSONStream bool
+ useJSONDecoder bool
+ headers map[string]string
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ // timeout is the inital connection timeout
+ timeout time.Duration
+}
+
+func (c *Client) stream(method, path string, streamOptions streamOptions) error {
+ if (method == "POST" || method == "PUT") && streamOptions.in == nil {
+ streamOptions.in = bytes.NewReader(nil)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return err
+ }
+ }
+ req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ for key, val := range streamOptions.headers {
+ req.Header.Set(key, val)
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if streamOptions.stdout == nil {
+ streamOptions.stdout = ioutil.Discard
+ }
+ if streamOptions.stderr == nil {
+ streamOptions.stderr = ioutil.Discard
+ }
+ if protocol == "unix" {
+ dial, err := c.Dialer.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ defer dial.Close()
+ breader := bufio.NewReader(dial)
+ err = req.Write(dial)
+ if err != nil {
+ return err
+ }
+
+ // ReadResponse may hang if server does not replay
+ if streamOptions.timeout > 0 {
+ dial.SetDeadline(time.Now().Add(streamOptions.timeout))
+ }
+
+ if resp, err = http.ReadResponse(breader, req); err != nil {
+ // Cancel timeout for future I/O operations
+ if streamOptions.timeout > 0 {
+ dial.SetDeadline(time.Time{})
+ }
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ } else {
+ if resp, err = c.HTTPClient.Do(req); err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return newError(resp)
+ }
+ if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
+ // if we want to get raw json stream, just copy it back to output
+ // without decoding it
+ if streamOptions.rawJSONStream {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ return err
+ }
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var m jsonMessage
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if m.Stream != "" {
+ fmt.Fprint(streamOptions.stdout, m.Stream)
+ } else if m.Progress != "" {
+ fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
+ } else if m.Error != "" {
+ return errors.New(m.Error)
+ }
+ if m.Status != "" {
+ fmt.Fprintln(streamOptions.stdout, m.Status)
+ }
+ }
+ } else {
+ if streamOptions.setRawTerminal {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ } else {
+ _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
+ }
+ return err
+ }
+ return nil
+}
+
+type hijackOptions struct {
+ success chan struct{}
+ setRawTerminal bool
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ data interface{}
+}
+
+func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error {
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return err
+ }
+ }
+ var params io.Reader
+ if hijackOptions.data != nil {
+ buf, err := json.Marshal(hijackOptions.data)
+ if err != nil {
+ return err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ if hijackOptions.stdout == nil {
+ hijackOptions.stdout = ioutil.Discard
+ }
+ if hijackOptions.stderr == nil {
+ hijackOptions.stderr = ioutil.Discard
+ }
+ req, err := http.NewRequest(method, c.getURL(path), params)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "plain/text")
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "tcp")
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ if c.TLSConfig != nil && protocol != "unix" {
+ dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
+ if err != nil {
+ return err
+ }
+ } else {
+ dial, err = c.Dialer.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ defer clientconn.Close()
+ clientconn.Do(req)
+ if hijackOptions.success != nil {
+ hijackOptions.success <- struct{}{}
+ <-hijackOptions.success
+ }
+ rwc, br := clientconn.Hijack()
+ defer rwc.Close()
+ errChanOut := make(chan error, 1)
+ errChanIn := make(chan error, 1)
+ go func() {
+ defer func() {
+ if hijackOptions.in != nil {
+ if closer, ok := hijackOptions.in.(io.Closer); ok {
+ errChanIn <- nil
+ closer.Close()
+ }
+ }
+ }()
+ var err error
+ if hijackOptions.setRawTerminal {
+ _, err = io.Copy(hijackOptions.stdout, br)
+ } else {
+ _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
+ }
+ errChanOut <- err
+ }()
+ go func() {
+ var err error
+ if hijackOptions.in != nil {
+ _, err = io.Copy(rwc, hijackOptions.in)
+ }
+ errChanIn <- err
+ rwc.(interface {
+ CloseWrite() error
+ }).CloseWrite()
+ }()
+ errIn := <-errChanIn
+ errOut := <-errChanOut
+ if errIn != nil {
+ return errIn
+ }
+ return errOut
+}
+
+func (c *Client) getURL(path string) string {
+ urlStr := strings.TrimRight(c.endpointURL.String(), "/")
+ if c.endpointURL.Scheme == "unix" {
+ urlStr = ""
+ }
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
+// domain socket to the given path.
+func (c *Client) getFakeUnixURL(path string) string {
+ u := *c.endpointURL // Copy.
+
+ // Override URL so that net/http will not complain.
+ u.Scheme = "http"
+ u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
+ u.Path = ""
+ urlStr := strings.TrimRight(u.String(), "/")
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+func (c *Client) unixClient() *http.Client {
+ if c.unixHTTPClient != nil {
+ return c.unixHTTPClient
+ }
+ socketPath := c.endpointURL.Path
+ c.unixHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Dial: func(network, addr string) (net.Conn, error) {
+ return c.Dialer.Dial("unix", socketPath)
+ },
+ },
+ }
+ return c.unixHTTPClient
+}
+
+type jsonMessage struct {
+ Status string `json:"status,omitempty"`
+ Progress string `json:"progress,omitempty"`
+ Error string `json:"error,omitempty"`
+ Stream string `json:"stream,omitempty"`
+}
+
+func queryString(opts interface{}) string {
+ if opts == nil {
+ return ""
+ }
+ value := reflect.ValueOf(opts)
+ if value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ if value.Kind() != reflect.Struct {
+ return ""
+ }
+ items := url.Values(map[string][]string{})
+ for i := 0; i < value.NumField(); i++ {
+ field := value.Type().Field(i)
+ if field.PkgPath != "" {
+ continue
+ }
+ key := field.Tag.Get("qs")
+ if key == "" {
+ key = strings.ToLower(field.Name)
+ } else if key == "-" {
+ continue
+ }
+ addQueryStringValue(items, key, value.Field(i))
+ }
+ return items.Encode()
+}
+
+func addQueryStringValue(items url.Values, key string, v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ items.Add(key, "1")
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.Int() > 0 {
+ items.Add(key, strconv.FormatInt(v.Int(), 10))
+ }
+ case reflect.Float32, reflect.Float64:
+ if v.Float() > 0 {
+ items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+ }
+ case reflect.String:
+ if v.String() != "" {
+ items.Add(key, v.String())
+ }
+ case reflect.Ptr:
+ if !v.IsNil() {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Map:
+ if len(v.MapKeys()) > 0 {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Array, reflect.Slice:
+ vLen := v.Len()
+ if vLen > 0 {
+ for i := 0; i < vLen; i++ {
+ addQueryStringValue(items, key, v.Index(i))
+ }
+ }
+ }
+}
+
+// Error represents failures in the API. It represents a failure from the API.
+type Error struct {
+ Status int
+ Message string
+}
+
+func newError(resp *http.Response) *Error {
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
+ }
+ return &Error{Status: resp.StatusCode, Message: string(data)}
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
+}
+
+func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, ErrInvalidEndpoint
+ }
+ if tls {
+ u.Scheme = "https"
+ }
+ switch u.Scheme {
+ case "unix":
+ return u, nil
+ case "http", "https", "tcp":
+ _, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ if e, ok := err.(*net.AddrError); ok {
+ if e.Err == "missing port in address" {
+ return u, nil
+ }
+ }
+ return nil, ErrInvalidEndpoint
+ }
+ number, err := strconv.ParseInt(port, 10, 64)
+ if err == nil && number > 0 && number < 65536 {
+ if u.Scheme == "tcp" {
+ if tls {
+ u.Scheme = "https"
+ } else {
+ u.Scheme = "http"
+ }
+ }
+ return u, nil
+ }
+ return nil, ErrInvalidEndpoint
+ default:
+ return nil, ErrInvalidEndpoint
+ }
+}
+
+type dockerEnv struct {
+ dockerHost string
+ dockerTLSVerify bool
+ dockerCertPath string
+}
+
+func getDockerEnv() (*dockerEnv, error) {
+ dockerHost := os.Getenv("DOCKER_HOST")
+ var err error
+ if dockerHost == "" {
+ dockerHost, err = DefaultDockerHost()
+ if err != nil {
+ return nil, err
+ }
+ }
+ dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
+ var dockerCertPath string
+ if dockerTLSVerify {
+ dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
+ if dockerCertPath == "" {
+ home := homedir.Get()
+ if home == "" {
+ return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
+ }
+ dockerCertPath = filepath.Join(home, ".docker")
+ dockerCertPath, err = filepath.Abs(dockerCertPath)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return &dockerEnv{
+ dockerHost: dockerHost,
+ dockerTLSVerify: dockerTLSVerify,
+ dockerCertPath: dockerCertPath,
+ }, nil
+}
+
+// DefaultDockerHost returns the default docker socket for the current OS
+func DefaultDockerHost() (string, error) {
+ var defaultHost string
+ if runtime.GOOS == "windows" {
+ // If we do not have a host, default to TCP socket on Windows
+ defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
+ } else {
+ // If we do not have a host, default to unix socket
+ defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
+ }
+ return opts.ValidateHost(defaultHost)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
new file mode 100644
index 0000000..faf1263
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
@@ -0,0 +1,1141 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// ErrContainerAlreadyExists is the error returned by CreateContainer when the
+// container already exists.
+var ErrContainerAlreadyExists = errors.New("container already exists")
+
+// ListContainersOptions specify parameters to the ListContainers function.
+//
+// See https://goo.gl/47a6tO for more details.
+type ListContainersOptions struct {
+ All bool
+ Size bool
+ Limit int
+ Since string
+ Before string
+ Filters map[string][]string
+}
+
+// APIPort is a type that represents a port mapping returned by the Docker API
+type APIPort struct {
+ PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"`
+ PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"`
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
+}
+
+// APIContainers represents each container in the list returned by
+// ListContainers.
+type APIContainers struct {
+ ID string `json:"Id" yaml:"Id"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Command string `json:"Command,omitempty" yaml:"Command,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"`
+ SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"`
+ Names []string `json:"Names,omitempty" yaml:"Names,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"`
+}
+
+// ListContainers returns a slice of containers matching the given criteria.
+//
+// See https://goo.gl/47a6tO for more details.
+func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
+ path := "/containers/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var containers []APIContainers
+ if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
+
+// Port represents the port number and the protocol, in the form
+// /. For example: 80/tcp.
+type Port string
+
+// Port returns the number of the port.
+func (p Port) Port() string {
+ return strings.Split(string(p), "/")[0]
+}
+
+// Proto returns the name of the protocol.
+func (p Port) Proto() string {
+ parts := strings.Split(string(p), "/")
+ if len(parts) == 1 {
+ return "tcp"
+ }
+ return parts[1]
+}
+
+// State represents the state of a container.
+type State struct {
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"`
+ Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"`
+ OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"`
+ Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ Error string `json:"Error,omitempty" yaml:"Error,omitempty"`
+ StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"`
+ FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"`
+}
+
+// String returns the string representation of a state.
+func (s *State) String() string {
+ if s.Running {
+ if s.Paused {
+ return "paused"
+ }
+ return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt))
+ }
+ return fmt.Sprintf("Exit %d", s.ExitCode)
+}
+
+// PortBinding represents the host/container port mapping as returned in the
+// `docker inspect` json
+type PortBinding struct {
+ HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"`
+ HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"`
+}
+
+// PortMapping represents a deprecated field in the `docker inspect` output,
+// and its value as found in NetworkSettings should always be nil
+type PortMapping map[string]string
+
+// NetworkSettings contains network-related information about a container
+type NetworkSettings struct {
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
+ Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
+ PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
+ Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
+ SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
+ LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"`
+ LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"`
+ SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"`
+ SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"`
+}
+
+// PortMappingAPI translates the port mappings as contained in NetworkSettings
+// into the format in which they would appear when returned by the API
+func (settings *NetworkSettings) PortMappingAPI() []APIPort {
+ var mapping []APIPort
+ for port, bindings := range settings.Ports {
+ p, _ := parsePort(port.Port())
+ if len(bindings) == 0 {
+ mapping = append(mapping, APIPort{
+ PublicPort: int64(p),
+ Type: port.Proto(),
+ })
+ continue
+ }
+ for _, binding := range bindings {
+ p, _ := parsePort(port.Port())
+ h, _ := parsePort(binding.HostPort)
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ PublicPort: int64(h),
+ Type: port.Proto(),
+ IP: binding.HostIP,
+ })
+ }
+ }
+ return mapping
+}
+
+func parsePort(rawPort string) (int, error) {
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// Config is the list of configuration options used when creating a container.
+// Config does not contain the options that are specific to starting a container on a
+// given host. Those are contained in HostConfig
+type Config struct {
+ Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
+ ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
+ Cmd []string `json:"Cmd" yaml:"Cmd"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
+ VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// Mount represents a mount point in the container.
+//
+// It has been added in the version 1.20 of the Docker API, available since
+// Docker 1.8.
+type Mount struct {
+ Source string
+ Destination string
+ Mode string
+ RW bool
+}
+
+// LogConfig defines the log driver type and the configuration for it.
+type LogConfig struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"`
+}
+
+// ULimit defines system-wide resource limitations
+// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users.
+type ULimit struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"`
+ Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"`
+}
+
+// SwarmNode containers information about which Swarm node the container is on
+type SwarmNode struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
+ Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// Container is the type encompasing everything about a container - its config,
+// hostconfig, etc.
+type Container struct {
+ ID string `json:"Id" yaml:"Id"`
+
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
+
+ Path string `json:"Path,omitempty" yaml:"Path,omitempty"`
+ Args []string `json:"Args,omitempty" yaml:"Args,omitempty"`
+
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
+ State State `json:"State,omitempty" yaml:"State,omitempty"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+
+ Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"`
+
+ NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
+
+ SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"`
+ ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"`
+ HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"`
+ HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"`
+ LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
+
+ Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
+
+ RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"`
+
+ AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"`
+}
+
+// RenameContainerOptions specify parameters to the RenameContainer function.
+//
+// See https://goo.gl/laSOIy for more details.
+type RenameContainerOptions struct {
+ // ID of container to rename
+ ID string `qs:"-"`
+
+ // New name
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+}
+
+// RenameContainer updates and existing containers name
+//
+// See https://goo.gl/laSOIy for more details.
+func (c *Client) RenameContainer(opts RenameContainerOptions) error {
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// InspectContainer returns information about a container by its ID.
+//
+// See https://goo.gl/RdIq0b for more details.
+func (c *Client) InspectContainer(id string) (*Container, error) {
+ path := "/containers/" + id + "/json"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+ return &container, nil
+}
+
+// ContainerChanges returns changes in the filesystem of the given container.
+//
+// See https://goo.gl/9GsTIF for more details.
+func (c *Client) ContainerChanges(id string) ([]Change, error) {
+ path := "/containers/" + id + "/changes"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var changes []Change
+ if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// CreateContainerOptions specify parameters to the CreateContainer function.
+//
+// See https://goo.gl/WxQzrr for more details.
+type CreateContainerOptions struct {
+ Name string
+ Config *Config `qs:"-"`
+ HostConfig *HostConfig `qs:"-"`
+}
+
+// CreateContainer creates a new container, returning the container instance,
+// or an error in case of failure.
+//
+// See https://goo.gl/WxQzrr for more details.
+func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
+ path := "/containers/create?" + queryString(opts)
+ resp, err := c.do(
+ "POST",
+ path,
+ doOptions{
+ data: struct {
+ *Config
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ }{
+ opts.Config,
+ opts.HostConfig,
+ },
+ },
+ )
+
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if e.Status == http.StatusConflict {
+ return nil, ErrContainerAlreadyExists
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+
+ container.Name = opts.Name
+
+ return &container, nil
+}
+
+// KeyValuePair is a type for generic key/value pairs as used in the Lxc
+// configuration
+type KeyValuePair struct {
+ Key string `json:"Key,omitempty" yaml:"Key,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty"`
+}
+
+// RestartPolicy represents the policy for automatically restarting a container.
+//
+// Possible values are:
+//
+// - always: the docker daemon will always restart the container
+// - on-failure: the docker daemon will restart the container on failures, at
+// most MaximumRetryCount times
+// - no: the docker daemon will not restart the container automatically
+type RestartPolicy struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"`
+}
+
+// AlwaysRestart returns a restart policy that tells the Docker daemon to
+// always restart the container.
+func AlwaysRestart() RestartPolicy {
+ return RestartPolicy{Name: "always"}
+}
+
+// RestartOnFailure returns a restart policy that tells the Docker daemon to
+// restart the container on failures, trying at most maxRetry times.
+func RestartOnFailure(maxRetry int) RestartPolicy {
+ return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
+}
+
+// NeverRestart returns a restart policy that tells the Docker daemon to never
+// restart the container on failures.
+func NeverRestart() RestartPolicy {
+ return RestartPolicy{Name: "no"}
+}
+
+// Device represents a device mapping between the Docker host and the
+// container.
+type Device struct {
+ PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"`
+ PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"`
+ CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"`
+}
+
+// HostConfig contains the container options related to starting a container on
+// a given host
+type HostConfig struct {
+ Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"`
+ CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
+ CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
+ GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"`
+ ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
+ LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
+ PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
+ Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
+ PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
+ DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
+ ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
+ VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
+ IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
+ PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
+ UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
+ Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
+ LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
+ ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
+ SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
+ CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"`
+ OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"`
+ CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"`
+ CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
+ CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
+ BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"`
+ Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
+}
+
+// StartContainer starts a container, returning an error in case of failure.
+//
+// See https://goo.gl/MrBAJv for more details.
+func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ path := "/containers/" + id + "/start"
+ resp, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id, Err: err}
+ }
+ return err
+ }
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerAlreadyRunning{ID: id}
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// StopContainer stops a container, killing it after the given timeout (in
+// seconds).
+//
+// See https://goo.gl/USqsFt for more details.
+func (c *Client) StopContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerNotRunning{ID: id}
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RestartContainer stops a container, killing it after the given timeout (in
+// seconds), during the stop process.
+//
+// See https://goo.gl/QzsDnz for more details.
+func (c *Client) RestartContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PauseContainer pauses the given container.
+//
+// See https://goo.gl/OF7W9X for more details.
+func (c *Client) PauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/pause", id)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UnpauseContainer unpauses the given container.
+//
+// See https://goo.gl/7dwyPA for more details.
+func (c *Client) UnpauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/unpause", id)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// TopResult represents the list of processes running in a container, as
+// returned by /containers//top.
+//
+// See https://goo.gl/Rb46aY for more details.
+type TopResult struct {
+ Titles []string
+ Processes [][]string
+}
+
+// TopContainer returns processes running inside a container
+//
+// See https://goo.gl/Rb46aY for more details.
+func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
+ var args string
+ var result TopResult
+ if psArgs != "" {
+ args = fmt.Sprintf("?ps_args=%s", psArgs)
+ }
+ path := fmt.Sprintf("/containers/%s/top%s", id, args)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return result, &NoSuchContainer{ID: id}
+ }
+ return result, err
+ }
+ defer resp.Body.Close()
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return result, err
+ }
+ return result, nil
+}
+
+// Stats represents container statistics, returned by /containers//stats.
+//
+// See https://goo.gl/GNmLHb for more details.
+type Stats struct {
+ Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
+ Network struct {
+ RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"`
+ RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"`
+ RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"`
+ TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"`
+ TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"`
+ RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"`
+ TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"`
+ TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"`
+ } `json:"network,omitempty" yaml:"network,omitempty"`
+ MemoryStats struct {
+ Stats struct {
+ TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"`
+ Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"`
+ MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"`
+ TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"`
+ Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"`
+ Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"`
+ TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"`
+ Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"`
+ Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"`
+ Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"`
+ TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"`
+ Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"`
+ TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"`
+ TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"`
+ TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"`
+ TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"`
+ RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"`
+ HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"`
+ TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"`
+ TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"`
+ ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"`
+ TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"`
+ TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"`
+ TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"`
+ InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"`
+ ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"`
+ Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"`
+ InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"`
+ TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"`
+ } `json:"stats,omitempty" yaml:"stats,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"`
+ Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"`
+ Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"`
+ } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"`
+ BlkioStats struct {
+ IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"`
+ IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"`
+ IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"`
+ IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"`
+ IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"`
+ IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"`
+ IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"`
+ } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"`
+ CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
+}
+
+// CPUStats is a stats entry for cpu stats
+type CPUStats struct {
+ CPUUsage struct {
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"`
+ UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"`
+ TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"`
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"`
+ } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"`
+ SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"`
+ ThrottlingData struct {
+ Periods uint64 `json:"periods,omitempty"`
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+ } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"`
+}
+
+// BlkioStatsEntry is a stats entry for blkio_stats
+type BlkioStatsEntry struct {
+ Major uint64 `json:"major,omitempty" yaml:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"`
+ Op string `json:"op,omitempty" yaml:"op,omitempty"`
+ Value uint64 `json:"value,omitempty" yaml:"value,omitempty"`
+}
+
+// StatsOptions specify parameters to the Stats function.
+//
+// See https://goo.gl/GNmLHb for more details.
+type StatsOptions struct {
+ ID string
+ Stats chan<- *Stats
+ Stream bool
+ // A flag that enables stopping the stats operation
+ Done <-chan bool
+ // Initial connection timeout
+ Timeout time.Duration
+}
+
+// Stats sends container statistics for the given container to the given channel.
+//
+// This function is blocking, similar to a streaming call for logs, and should be run
+// on a separate goroutine from the caller. Note that this function will block until
+// the given container is removed, not just exited. When finished, this function
+// will close the given channel. Alternatively, function can be stopped by
+// signaling on the Done channel.
+//
+// See https://goo.gl/GNmLHb for more details.
+func (c *Client) Stats(opts StatsOptions) (retErr error) {
+ errC := make(chan error, 1)
+ readCloser, writeCloser := io.Pipe()
+
+ defer func() {
+ close(opts.Stats)
+
+ select {
+ case err := <-errC:
+ if err != nil && retErr == nil {
+ retErr = err
+ }
+ default:
+ // No errors
+ }
+
+ if err := readCloser.Close(); err != nil && retErr == nil {
+ retErr = err
+ }
+ }()
+
+ go func() {
+ err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
+ rawJSONStream: true,
+ useJSONDecoder: true,
+ stdout: writeCloser,
+ timeout: opts.Timeout,
+ })
+ if err != nil {
+ dockerError, ok := err.(*Error)
+ if ok {
+ if dockerError.Status == http.StatusNotFound {
+ err = &NoSuchContainer{ID: opts.ID}
+ }
+ }
+ }
+ if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ errC <- err
+ close(errC)
+ }()
+
+ quit := make(chan struct{})
+ defer close(quit)
+ go func() {
+ // block here waiting for the signal to stop function
+ select {
+ case <-opts.Done:
+ readCloser.Close()
+ case <-quit:
+ return
+ }
+ }()
+
+ decoder := json.NewDecoder(readCloser)
+ stats := new(Stats)
+ for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) {
+ if err != nil {
+ return err
+ }
+ opts.Stats <- stats
+ stats = new(Stats)
+ }
+ return nil
+}
+
+// KillContainerOptions represents the set of options that can be used in a
+// call to KillContainer.
+//
+// See https://goo.gl/hkS9i8 for more details.
+type KillContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // The signal to send to the container. When omitted, Docker server
+ // will assume SIGKILL.
+ Signal Signal
+}
+
+// KillContainer sends a signal to a container, returning an error in case of
+// failure.
+//
+// See https://goo.gl/hkS9i8 for more details.
+func (c *Client) KillContainer(opts KillContainerOptions) error {
+ path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveContainerOptions encapsulates options to remove a container.
+//
+// See https://goo.gl/RQyX62 for more details.
+type RemoveContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // A flag that indicates whether Docker should remove the volumes
+ // associated to the container.
+ RemoveVolumes bool `qs:"v"`
+
+ // A flag that indicates whether Docker should remove the container
+ // even if it is currently running.
+ Force bool
+}
+
+// RemoveContainer removes a container, returning an error in case of failure.
+//
+// See https://goo.gl/RQyX62 for more details.
+func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
+ path := "/containers/" + opts.ID + "?" + queryString(opts)
+ resp, err := c.do("DELETE", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UploadToContainerOptions is the set of options that can be used when
+// uploading an archive into a container.
+//
+// See https://goo.gl/Ss97HW for more details.
+type UploadToContainerOptions struct {
+ InputStream io.Reader `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"`
+}
+
+// UploadToContainer uploads a tar archive to be extracted to a path in the
+// filesystem of the container.
+//
+// See https://goo.gl/Ss97HW for more details.
+func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream("PUT", url, streamOptions{
+ in: opts.InputStream,
+ })
+}
+
+// DownloadFromContainerOptions is the set of options that can be used when
+// downloading resources from a container.
+//
+// See https://goo.gl/KnZJDX for more details.
+type DownloadFromContainerOptions struct {
+ OutputStream io.Writer `json:"-" qs:"-"`
+ Path string `qs:"path"`
+}
+
+// DownloadFromContainer downloads a tar archive of files or folders in a container.
+//
+// See https://goo.gl/KnZJDX for more details.
+func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer.
+//
+// See https://goo.gl/R2jevW for more details.
+type CopyFromContainerOptions struct {
+ OutputStream io.Writer `json:"-"`
+ Container string `json:"-"`
+ Resource string
+}
+
+// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer.
+//
+// See https://goo.gl/R2jevW for more details.
+func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ url := fmt.Sprintf("/containers/%s/copy", opts.Container)
+ resp, err := c.do("POST", url, doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ _, err = io.Copy(opts.OutputStream, resp.Body)
+ return err
+}
+
+// WaitContainer blocks until the given container stops, return the exit code
+// of the container status.
+//
+// See https://goo.gl/Gc1rge for more details.
+func (c *Client) WaitContainer(id string) (int, error) {
+ resp, err := c.do("POST", "/containers/"+id+"/wait", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return 0, &NoSuchContainer{ID: id}
+ }
+ return 0, err
+ }
+ defer resp.Body.Close()
+ var r struct{ StatusCode int }
+ if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return 0, err
+ }
+ return r.StatusCode, nil
+}
+
+// CommitContainerOptions aggregates parameters to the CommitContainer method.
+//
+// See https://goo.gl/mqfoCw for more details.
+type CommitContainerOptions struct {
+ Container string
+ Repository string `qs:"repo"`
+ Tag string
+ Message string `qs:"m"`
+ Author string
+ Run *Config `qs:"-"`
+}
+
+// CommitContainer creates a new image from a container's changes.
+//
+// See https://goo.gl/mqfoCw for more details.
+func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
+ path := "/commit?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{data: opts.Run})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var image Image
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
+
+// AttachToContainerOptions is the set of options that can be used when
+// attaching to a container.
+//
+// See https://goo.gl/NKpkFk for more details.
+type AttachToContainerOptions struct {
+ Container string `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Get container logs, sending it to OutputStream.
+ Logs bool
+
+ // Stream the response?
+ Stream bool
+
+ // Attach to stdin, and use InputStream.
+ Stdin bool
+
+ // Attach to stdout, and use OutputStream.
+ Stdout bool
+
+ // Attach to stderr, and use ErrorStream.
+ Stderr bool
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// AttachToContainer attaches to a container, using the given options.
+//
+// See https://goo.gl/NKpkFk for more details.
+func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
+
+// LogsOptions represents the set of options used when getting logs from a
+// container.
+//
+// See https://goo.gl/yl8PGm for more details.
+type LogsOptions struct {
+ Container string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Since int64
+ Timestamps bool
+ Tail string
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// Logs gets stdout and stderr logs from the specified container.
+//
+// See https://goo.gl/yl8PGm for more details.
+func (c *Client) Logs(opts LogsOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if opts.Tail == "" {
+ opts.Tail = "all"
+ }
+ path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
+ return c.stream("GET", path, streamOptions{
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
+
+// ResizeContainerTTY resizes the terminal to the given height and width.
+//
+// See https://goo.gl/xERhCc for more details.
+func (c *Client) ResizeContainerTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+ resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ExportContainerOptions is the set of parameters to the ExportContainer
+// method.
+//
+// See https://goo.gl/dOkTyk for more details.
+type ExportContainerOptions struct {
+ ID string
+ OutputStream io.Writer
+}
+
+// ExportContainer export the contents of container id as tar archive
+// and prints the exported contents to stdout.
+//
+// See https://goo.gl/dOkTyk for more details.
+func (c *Client) ExportContainer(opts ExportContainerOptions) error {
+ if opts.ID == "" {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ url := fmt.Sprintf("/containers/%s/export", opts.ID)
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+// NoSuchContainer is the error returned when a given container does not exist.
+type NoSuchContainer struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchContainer) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such container: " + err.ID
+}
+
+// ContainerAlreadyRunning is the error returned when a given container is
+// already running.
+type ContainerAlreadyRunning struct {
+ ID string
+}
+
+func (err *ContainerAlreadyRunning) Error() string {
+ return "Container already running: " + err.ID
+}
+
+// ContainerNotRunning is the error returned when a given container is not
+// running.
+type ContainerNotRunning struct {
+ ID string
+}
+
+func (err *ContainerNotRunning) Error() string {
+ return "Container not running: " + err.ID
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
new file mode 100644
index 0000000..c54b0b0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
@@ -0,0 +1,168 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Env represents a list of key-pair represented in the form KEY=VALUE.
+type Env []string
+
+// Get returns the string value of the given key.
+func (env *Env) Get(key string) (value string) {
+ return env.Map()[key]
+}
+
+// Exists checks whether the given key is defined in the internal Env
+// representation.
+func (env *Env) Exists(key string) bool {
+ _, exists := env.Map()[key]
+ return exists
+}
+
+// GetBool returns a boolean representation of the given key. The key is false
+// whenever its value if 0, no, false, none or an empty string. Any other value
+// will be interpreted as true.
+func (env *Env) GetBool(key string) (value bool) {
+ s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
+ if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
+ return false
+ }
+ return true
+}
+
+// SetBool defines a boolean value to the given key.
+func (env *Env) SetBool(key string, value bool) {
+ if value {
+ env.Set(key, "1")
+ } else {
+ env.Set(key, "0")
+ }
+}
+
+// GetInt returns the value of the provided key, converted to int.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt(key string) int {
+ return int(env.GetInt64(key))
+}
+
+// SetInt defines an integer value to the given key.
+func (env *Env) SetInt(key string, value int) {
+ env.Set(key, strconv.Itoa(value))
+}
+
+// GetInt64 returns the value of the provided key, converted to int64.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt64(key string) int64 {
+ s := strings.Trim(env.Get(key), " \t")
+ val, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return -1
+ }
+ return val
+}
+
+// SetInt64 defines an integer (64-bit wide) value to the given key.
+func (env *Env) SetInt64(key string, value int64) {
+ env.Set(key, strconv.FormatInt(value, 10))
+}
+
+// GetJSON unmarshals the value of the provided key in the provided iface.
+//
+// iface is a value that can be provided to the json.Unmarshal function.
+func (env *Env) GetJSON(key string, iface interface{}) error {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ return json.Unmarshal([]byte(sval), iface)
+}
+
+// SetJSON marshals the given value to JSON format and stores it using the
+// provided key.
+func (env *Env) SetJSON(key string, value interface{}) error {
+ sval, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ env.Set(key, string(sval))
+ return nil
+}
+
+// GetList returns a list of strings matching the provided key. It handles the
+// list as a JSON representation of a list of strings.
+//
+// If the given key matches to a single string, it will return a list
+// containing only the value that matches the key.
+func (env *Env) GetList(key string) []string {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ var l []string
+ if err := json.Unmarshal([]byte(sval), &l); err != nil {
+ l = append(l, sval)
+ }
+ return l
+}
+
+// SetList stores the given list in the provided key, after serializing it to
+// JSON format.
+func (env *Env) SetList(key string, value []string) error {
+ return env.SetJSON(key, value)
+}
+
+// Set defines the value of a key to the given string.
+func (env *Env) Set(key, value string) {
+ *env = append(*env, key+"="+value)
+}
+
+// Decode decodes `src` as a json dictionary, and adds each decoded key-value
+// pair to the environment.
+//
+// If `src` cannot be decoded as a json dictionary, an error is returned.
+func (env *Env) Decode(src io.Reader) error {
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(src).Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ env.SetAuto(k, v)
+ }
+ return nil
+}
+
+// SetAuto will try to define the Set* method to call based on the given value.
+func (env *Env) SetAuto(key string, value interface{}) {
+ if fval, ok := value.(float64); ok {
+ env.SetInt64(key, int64(fval))
+ } else if sval, ok := value.(string); ok {
+ env.Set(key, sval)
+ } else if val, err := json.Marshal(value); err == nil {
+ env.Set(key, string(val))
+ } else {
+ env.Set(key, fmt.Sprintf("%v", value))
+ }
+}
+
+// Map returns the map representation of the env.
+func (env *Env) Map() map[string]string {
+ if len(*env) == 0 {
+ return nil
+ }
+ m := make(map[string]string)
+ for _, kv := range *env {
+ parts := strings.SplitN(kv, "=", 2)
+ m[parts[0]] = parts[1]
+ }
+ return m
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
new file mode 100644
index 0000000..eaffddb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
@@ -0,0 +1,304 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// APIEvents represents an event returned by the API.
+type APIEvents struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ From string `json:"From,omitempty" yaml:"From,omitempty"`
+ Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"`
+}
+
+type eventMonitoringState struct {
+ sync.RWMutex
+ sync.WaitGroup
+ enabled bool
+ lastSeen *int64
+ C chan *APIEvents
+ errC chan error
+ listeners []chan<- *APIEvents
+}
+
+const (
+ maxMonitorConnRetries = 5
+ retryInitialWaitTime = 10.
+)
+
+var (
+ // ErrNoListeners is the error returned when no listeners are available
+ // to receive an event.
+ ErrNoListeners = errors.New("no listeners present to receive event")
+
+ // ErrListenerAlreadyExists is the error returned when the listerner already
+ // exists.
+ ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
+
+ // EOFEvent is sent when the event listener receives an EOF error.
+ EOFEvent = &APIEvents{
+ Status: "EOF",
+ }
+)
+
+// AddEventListener adds a new listener to container events in the Docker API.
+//
+// The parameter is a channel through which events will be sent.
+func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
+ var err error
+ if !c.eventMonitor.isEnabled() {
+ err = c.eventMonitor.enableEventMonitoring(c)
+ if err != nil {
+ return err
+ }
+ }
+ err = c.eventMonitor.addListener(listener)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveEventListener removes a listener from the monitor.
+func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
+ err := c.eventMonitor.removeListener(listener)
+ if err != nil {
+ return err
+ }
+ if len(c.eventMonitor.listeners) == 0 {
+ c.eventMonitor.disableEventMonitoring()
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ return ErrListenerAlreadyExists
+ }
+ eventState.Add(1)
+ eventState.listeners = append(eventState.listeners, listener)
+ return nil
+}
+
+func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ var newListeners []chan<- *APIEvents
+ for _, l := range eventState.listeners {
+ if l != listener {
+ newListeners = append(newListeners, l)
+ }
+ }
+ eventState.listeners = newListeners
+ eventState.Add(-1)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) closeListeners() {
+ for _, l := range eventState.listeners {
+ close(l)
+ eventState.Add(-1)
+ }
+ eventState.listeners = nil
+}
+
+func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
+ for _, b := range *list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if !eventState.enabled {
+ eventState.enabled = true
+ var lastSeenDefault = int64(0)
+ eventState.lastSeen = &lastSeenDefault
+ eventState.C = make(chan *APIEvents, 100)
+ eventState.errC = make(chan error, 1)
+ go eventState.monitorEvents(c)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) disableEventMonitoring() error {
+ eventState.Lock()
+ defer eventState.Unlock()
+
+ eventState.closeListeners()
+
+ eventState.Wait()
+
+ if eventState.enabled {
+ eventState.enabled = false
+ close(eventState.C)
+ close(eventState.errC)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) monitorEvents(c *Client) {
+ var err error
+ for eventState.noListeners() {
+ time.Sleep(10 * time.Millisecond)
+ }
+ if err = eventState.connectWithRetry(c); err != nil {
+ // terminate if connect failed
+ eventState.disableEventMonitoring()
+ return
+ }
+ for eventState.isEnabled() {
+ timeout := time.After(100 * time.Millisecond)
+ select {
+ case ev, ok := <-eventState.C:
+ if !ok {
+ return
+ }
+ if ev == EOFEvent {
+ eventState.disableEventMonitoring()
+ return
+ }
+ eventState.updateLastSeen(ev)
+ go eventState.sendEvent(ev)
+ case err = <-eventState.errC:
+ if err == ErrNoListeners {
+ eventState.disableEventMonitoring()
+ return
+ } else if err != nil {
+ defer func() { go eventState.monitorEvents(c) }()
+ return
+ }
+ case <-timeout:
+ continue
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
+ var retries int
+ var err error
+ for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
+ waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
+ time.Sleep(time.Duration(waitTime) * time.Millisecond)
+ err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
+ }
+ return err
+}
+
+func (eventState *eventMonitoringState) noListeners() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners) == 0
+}
+
+func (eventState *eventMonitoringState) isEnabled() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return eventState.enabled
+}
+
+func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ eventState.Add(1)
+ defer eventState.Done()
+ if eventState.enabled {
+ if len(eventState.listeners) == 0 {
+ eventState.errC <- ErrNoListeners
+ return
+ }
+
+ for _, listener := range eventState.listeners {
+ listener <- event
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if atomic.LoadInt64(eventState.lastSeen) < e.Time {
+ atomic.StoreInt64(eventState.lastSeen, e.Time)
+ }
+}
+
+func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
+ uri := "/events"
+ if startTime != 0 {
+ uri += fmt.Sprintf("?since=%d", startTime)
+ }
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ var err error
+ if c.TLSConfig == nil {
+ dial, err = c.Dialer.Dial(protocol, address)
+ } else {
+ dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
+ }
+ if err != nil {
+ return err
+ }
+ conn := httputil.NewClientConn(dial, nil)
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ res, err := conn.Do(req)
+ if err != nil {
+ return err
+ }
+ go func(res *http.Response, conn *httputil.ClientConn) {
+ defer conn.Close()
+ defer res.Body.Close()
+ decoder := json.NewDecoder(res.Body)
+ for {
+ var event APIEvents
+ if err = decoder.Decode(&event); err != nil {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ if c.eventMonitor.isEnabled() {
+ // Signal that we're exiting.
+ eventChan <- EOFEvent
+ }
+ break
+ }
+ errChan <- err
+ }
+ if event.Time == 0 {
+ continue
+ }
+ if !c.eventMonitor.isEnabled() {
+ return
+ }
+ eventChan <- &event
+ }
+ }(res, conn)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
new file mode 100644
index 0000000..f3b705f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
@@ -0,0 +1,186 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// Exec is the type representing a `docker exec` instance and containing the
+// instance ID
+type Exec struct {
+ ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
+}
+
+// CreateExecOptions specify parameters to the CreateExecContainer function.
+//
+// See https://goo.gl/1KSIb7 for more details
+type CreateExecOptions struct {
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+}
+
+// CreateExec sets up an exec instance in a running container `id`, returning the exec
+// instance, or an error in case of failure.
+//
+// See https://goo.gl/1KSIb7 for more details
+func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
+ path := fmt.Sprintf("/containers/%s/exec", opts.Container)
+ resp, err := c.do("POST", path, doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var exec Exec
+ if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
+ return nil, err
+ }
+
+ return &exec, nil
+}
+
+// StartExecOptions specify parameters to the StartExecContainer function.
+//
+// See https://goo.gl/iQCnto for more details
+type StartExecOptions struct {
+ Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
+
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{} `json:"-"`
+}
+
+// StartExec starts a previously set up exec instance id. If opts.Detach is
+// true, it returns after starting the exec command. Otherwise, it sets up an
+// interactive session with the exec command.
+//
+// See https://goo.gl/iQCnto for more details
+func (c *Client) StartExec(id string, opts StartExecOptions) error {
+ if id == "" {
+ return &NoSuchExec{ID: id}
+ }
+
+ path := fmt.Sprintf("/exec/%s/start", id)
+
+ if opts.Detach {
+ resp, err := c.do("POST", path, doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchExec{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+ }
+
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ data: opts,
+ })
+}
+
+// ResizeExecTTY resizes the tty session used by the exec command id. This API
+// is valid only if Tty was specified as part of creating and starting the exec
+// command.
+//
+// See https://goo.gl/e1JpsA for more details
+func (c *Client) ResizeExecTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+
+ path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ExecProcessConfig is a type describing the command associated to a Exec
+// instance. It's used in the ExecInspect type.
+type ExecProcessConfig struct {
+ Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
+ User string `json:"user,omitempty" yaml:"user,omitempty"`
+ Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
+ EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
+ Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
+}
+
+// ExecInspect is a type with details about a exec instance, including the
+// exit code if the command has finished running. It's returned by a api
+// call to /exec/(id)/json
+//
+// See https://goo.gl/gPtX9R for more details
+type ExecInspect struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
+ OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
+ ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
+ Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
+}
+
+// InspectExec returns low-level information about the exec command id.
+//
+// See https://goo.gl/gPtX9R for more details
+func (c *Client) InspectExec(id string) (*ExecInspect, error) {
+ path := fmt.Sprintf("/exec/%s/json", id)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var exec ExecInspect
+ if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
+ return nil, err
+ }
+ return &exec, nil
+}
+
+// NoSuchExec is the error returned when a given exec instance does not exist.
+type NoSuchExec struct {
+ ID string
+}
+
+func (err *NoSuchExec) Error() string {
+ return "No such exec instance: " + err.ID
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..a387154
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,26 @@
+# (Unreleased)
+
+logrus/core: improve performance of text formatter by 40%
+logrus/core: expose `LevelHooks` type
+
+# 0.8.2
+
+logrus: fix more Fatal family functions
+
+# 0.8.1
+
+logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+logrus: defaults to stderr instead of stdout
+hooks/sentry: add special field for `*http.Request`
+formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+formatter/text: Add configuration option for time format (#158)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..4be3784
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,355 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&logrus.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
+
+ ```go
+ logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
+ ```
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..699ea03
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,254 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..a67e1b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,188 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..104d689
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,48 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..2ad6dc5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,41 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..e4974bf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,206 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..43ee12e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..71f8d67
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,9 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go
new file mode 100644
index 0000000..0428ee5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go
@@ -0,0 +1,20 @@
+/*
+ Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+*/
+package logrus
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..a2c0b40
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..b8bebc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go
new file mode 100644
index 0000000..af609a5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go
@@ -0,0 +1,7 @@
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..2e09f6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..2e6fe1b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,158 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+
+ switch value := value.(type) {
+ case string:
+ if needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+
+ b.WriteByte(' ')
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..1e30b1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,31 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ go logger.writerScanner(reader)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ logger.Print(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go
new file mode 100644
index 0000000..b854227
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go
@@ -0,0 +1,62 @@
+package opts
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+)
+
+var (
+ // EnvironmentVariableRegexp A regexp to validate correct environment variables
+ // Environment variables set by the user must have a name consisting solely of
+ // alphabetics, numerics, and underscores - the first of which must not be numeric.
+ EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
+)
+
+// ParseEnvFile Read in a line delimited file with environment variables enumerated
+func ParseEnvFile(filename string) ([]string, error) {
+ fh, err := os.Open(filename)
+ if err != nil {
+ return []string{}, err
+ }
+ defer fh.Close()
+
+ lines := []string{}
+ scanner := bufio.NewScanner(fh)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // line is not empty, and not starting with '#'
+ if len(line) > 0 && !strings.HasPrefix(line, "#") {
+ data := strings.SplitN(line, "=", 2)
+
+ // trim the front of a variable, but nothing else
+ variable := strings.TrimLeft(data[0], whiteSpaces)
+
+ if !EnvironmentVariableRegexp.MatchString(variable) {
+ return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)}
+ }
+ if len(data) > 1 {
+
+ // pass the value through, no trimming
+ lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
+ } else {
+ // if only a pass-through variable is given, clean it up.
+ lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
+ }
+ }
+ }
+ return lines, scanner.Err()
+}
+
+var whiteSpaces = " \t"
+
+// ErrBadEnvVariable typed error for bad environment variable
+type ErrBadEnvVariable struct {
+ msg string
+}
+
+func (e ErrBadEnvVariable) Error() string {
+ return fmt.Sprintf("poorly formatted environment: %s", e.msg)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 0000000..a29335e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package opts
+
+import "fmt"
+
+var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 0000000..55eac2a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package opts
+
+import "fmt"
+
+var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
new file mode 100644
index 0000000..b1f9587
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,35 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+)
+
+// IpOpt type that hold an IP
+type IpOpt struct {
+ *net.IP
+}
+
+func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt {
+ o := &IpOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+func (o *IpOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+func (o *IpOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
new file mode 100644
index 0000000..aa409b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,323 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+ // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
+ DefaultHTTPHost = "127.0.0.1"
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp://
+ // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
+ // is not supplied. A better longer term solution would be to use a named
+ // pipe as the default on the Windows daemon.
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+)
+
+// ListOpts type that hold a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts Create a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ return fmt.Sprintf("%v", []string((*opts.values)))
+}
+
+// Set validates if needed the input value and add it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ (*opts.values) = append((*opts.values), value)
+ return nil
+}
+
+// Delete remove the given element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+// FIXME: can we remove this?
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values' slice.
+// FIXME: Can we remove this?
+func (opts *ListOpts) GetAll() []string {
+ return (*opts.values)
+}
+
+// Get checks the existence of the given key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len((*opts.values))
+}
+
+//MapOpts type that holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// ValidatorFctType validator that return a validate string and/or an error
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType validator that return a validate list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateAttach Validates that the specified string is a valid attach option.
+func ValidateAttach(val string) (string, error) {
+ s := strings.ToLower(val)
+ for _, str := range []string{"stdin", "stdout", "stderr"} {
+ if s == str {
+ return s, nil
+ }
+ }
+ return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
+}
+
+// ValidateLink Validates that the specified string has a valid link format (containerName:alias).
+func ValidateLink(val string) (string, error) {
+ if _, _, err := parsers.ParseLink(val); err != nil {
+ return val, err
+ }
+ return val, nil
+}
+
+// ValidateDevice Validate a path for devices
+// It will make sure 'val' is in the form:
+// [host-dir:]container-path[:mode]
+func ValidateDevice(val string) (string, error) {
+ return validatePath(val, false)
+}
+
+// ValidatePath Validate a path for volumes
+// It will make sure 'val' is in the form:
+// [host-dir:]container-path[:rw|ro]
+// It will also validate the mount mode.
+func ValidatePath(val string) (string, error) {
+ return validatePath(val, true)
+}
+
+func validatePath(val string, validateMountMode bool) (string, error) {
+ var containerPath string
+ var mode string
+
+ if strings.Count(val, ":") > 2 {
+ return val, fmt.Errorf("bad format for volumes: %s", val)
+ }
+
+ splited := strings.SplitN(val, ":", 3)
+ if splited[0] == "" {
+ return val, fmt.Errorf("bad format for volumes: %s", val)
+ }
+ switch len(splited) {
+ case 1:
+ containerPath = splited[0]
+ val = path.Clean(containerPath)
+ case 2:
+ if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid {
+ containerPath = splited[0]
+ mode = splited[1]
+ val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
+ } else {
+ containerPath = splited[1]
+ val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath))
+ }
+ case 3:
+ containerPath = splited[1]
+ mode = splited[2]
+ if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid {
+ return val, fmt.Errorf("bad mount mode specified : %s", mode)
+ }
+ val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode)
+ }
+
+ if !path.IsAbs(containerPath) {
+ return val, fmt.Errorf("%s is not an absolute path", containerPath)
+ }
+ return val, nil
+}
+
+// ValidateEnv Validate an environment variable and returns it
+// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid.
+// If no value is specified, it returns the current value using os.Getenv.
+func ValidateEnv(val string) (string, error) {
+ arr := strings.Split(val, "=")
+ if len(arr) > 1 {
+ return val, nil
+ }
+ if !EnvironmentVariableRegexp.MatchString(arr[0]) {
+ return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)}
+ }
+ if !doesEnvExist(val) {
+ return val, nil
+ }
+ return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
+}
+
+// ValidateIPAddress Validates an Ip address
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateMACAddress Validates a MAC address
+func ValidateMACAddress(val string) (string, error) {
+ _, err := net.ParseMAC(strings.TrimSpace(val))
+ if err != nil {
+ return "", err
+ }
+ return val, nil
+}
+
+// ValidateDNSSearch Validates domain for resolvconf search configuration.
+// A zero length domain is represented by .
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateExtraHost Validate that the given string is a valid extrahost and returns it
+// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6)
+func ValidateExtraHost(val string) (string, error) {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return "", fmt.Errorf("bad format for add-host: %q", val)
+ }
+ if _, err := ValidateIPAddress(arr[1]); err != nil {
+ return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ return val, nil
+}
+
+// ValidateLabel Validate that the given string is a valid label, and returns it
+// Labels are in the form on key=value
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+ return val, nil
+}
+
+// ValidateHost Validate that the given string is a valid host and returns it
+func ValidateHost(val string) (string, error) {
+ host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
+ if err != nil {
+ return val, err
+ }
+ return host, nil
+}
+
+func doesEnvExist(name string) bool {
+ for _, entry := range os.Environ() {
+ parts := strings.SplitN(entry, "=", 2)
+ if parts[0] == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go
new file mode 100644
index 0000000..54f6c4e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go
@@ -0,0 +1,47 @@
+package opts
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"
+)
+
+type UlimitOpt struct {
+ values *map[string]*ulimit.Ulimit
+}
+
+func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt {
+ if ref == nil {
+ ref = &map[string]*ulimit.Ulimit{}
+ }
+ return &UlimitOpt{ref}
+}
+
+func (o *UlimitOpt) Set(val string) error {
+ l, err := ulimit.Parse(val)
+ if err != nil {
+ return err
+ }
+
+ (*o.values)[l.Name] = l
+
+ return nil
+}
+
+func (o *UlimitOpt) String() string {
+ var out []string
+ for _, v := range *o.values {
+ out = append(out, v.String())
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+func (o *UlimitOpt) GetList() []*ulimit.Ulimit {
+ var ulimits []*ulimit.Ulimit
+ for _, v := range *o.values {
+ ulimits = append(ulimits, v)
+ }
+
+ return ulimits
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 0000000..7307d96
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 0000000..7306840
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,902 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+type (
+ Archive io.ReadCloser
+ ArchiveReader io.Reader
+ Compression int
+ TarChownOptions struct {
+ UID, GID int
+ }
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ ChownOpts *TarChownOptions
+ Name string
+ IncludeSourceDir bool
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ }
+
+ // Archiver allows the reuse of most utility functions of this package
+ // with a pluggable Untar function.
+ Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ }
+
+ // breakoutError is used to differentiate errors related to breaking out
+ // When testing archive breakout in the unit tests, this error is expected
+ // in order for the test to pass.
+ breakoutError error
+)
+
+var (
+ ErrNotImplemented = errors.New("Function not implemented")
+ defaultArchiver = &Archiver{Untar}
+)
+
+const (
+ Uncompressed Compression = iota
+ Bzip2
+ Gzip
+ Xz
+)
+
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debugf("Len too short")
+ continue
+ }
+ if bytes.Compare(m, source[:len(m)]) == 0 {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return CmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil {
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return readBufWrapper, nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ link := ""
+ if fi.Mode()&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(path); err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+
+ nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+ if err != nil {
+ return err
+ }
+
+ // if it's a regular file and has more than 1 link,
+ // it's hardlinked, so set the type flag accordingly
+ if fi.Mode().IsRegular() && nlink > 1 {
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debugf("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ // syscall.UtimesNano doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ } else {
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
+
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(compressWriter),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ }
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Debugf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Debugf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ var renamedRelFilePath string // For when tar.Options.Name is set
+ for _, include := range options.IncludeFiles {
+ // We can't use filepath.Join(srcPath, include) because this will
+ // clean away a trailing "." or "/" which may be important.
+ walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
+ if err != nil {
+ logrus.Debugf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ if !exceptions && f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // TODO Windows: Verify if this needs to be os.Pathseparator
+ // Rename the base resource
+ if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
+ renamedRelFilePath = relFilePath
+ }
+ // Set this to make sure the items underneath also get renamed
+ if options.Name != "" {
+ relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0777)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ if err := syscall.UtimesNano(path, ts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ var r io.Reader = tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ return archiver.Untar(archive, dst, nil)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+ return defaultArchiver.TarUntar(src, dst)
+}
+
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ if err := archiver.Untar(archive, dst, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return defaultArchiver.UntarPath(src, dst)
+}
+
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+ return defaultArchiver.CopyWithTar(src, dst)
+}
+
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := promise.Go(func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ })
+ defer func() {
+ if er := <-errC; err != nil {
+ err = er
+ }
+ }()
+ return archiver.Untar(r, filepath.Dir(dst), nil)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// Destination handling is in an operating specific manner depending
+// where the daemon is running. If `dst` ends with a trailing slash
+// the final destination path will be `dst/base(src)` (Linux) or
+// `dst\base(src)` (Windows).
+func CopyFileWithTar(src, dst string) (err error) {
+ return defaultArchiver.CopyFileWithTar(src, dst)
+}
+
+// CmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
+ if input != nil {
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, err
+ }
+ // Write stdin if any
+ go func() {
+ io.Copy(stdin, input)
+ stdin.Close()
+ }()
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, err
+ }
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, err
+ }
+ pipeR, pipeW := io.Pipe()
+ errChan := make(chan []byte)
+ // Collect stderr, we will use it in case of an error
+ go func() {
+ errText, e := ioutil.ReadAll(stderr)
+ if e != nil {
+ errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
+ }
+ errChan <- errText
+ }()
+ // Copy stdout to the returned pipe
+ go func() {
+ _, err := io.Copy(pipeW, stdout)
+ if err != nil {
+ pipeW.CloseWithError(err)
+ }
+ errText := <-errChan
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
+ } else {
+ pipeW.Close()
+ }
+ }()
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
+ return pipeR, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 0000000..5c75437
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,89 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "syscall"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ err = errors.New("cannot convert stat value to syscall.Stat_t")
+ return
+ }
+
+ nlink = uint32(s.Nlink)
+ inode = uint64(s.Ino)
+
+ // Currently go does not fil in the major/minors
+ if s.Mode&syscall.S_IFBLK != 0 ||
+ s.Mode&syscall.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev)))
+ hdr.Devminor = int64(minor(uint64(s.Rdev)))
+ }
+
+ return
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= syscall.S_IFBLK
+ case tar.TypeChar:
+ mode |= syscall.S_IFCHR
+ case tar.TypeFifo:
+ mode |= syscall.S_IFIFO
+ }
+
+ if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ return err
+ }
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 0000000..10db4bd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,50 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// canonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ perm &= 0755
+ // Add the x bit: make everything +x from windows
+ perm |= 0111
+
+ return perm
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+ // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 0000000..c7838e8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,383 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+type ChangeType int
+
+const (
+ ChangeModify = iota
+ ChangeAdd
+ ChangeDelete
+)
+
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ // Skip AUFS metadata
+ if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
+ return err
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ // Find out what kind of modification happened
+ file := filepath.Base(path)
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(file, ".wh.") {
+ originalFile := file[len(".wh."):]
+ change.Path = filepath.Join(filepath.Dir(path), originalFile)
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.Stat_t
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+func (root *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := root
+ if path == string(os.PathSeparator) {
+ return root
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild, _ := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ bytes.Compare(oldChild.capability, newChild.capability) != 0 {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var size int64
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, _ := os.Lstat(file)
+ if fileInfo != nil && !fileInfo.IsDir() {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change) (Archive, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ }
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 0000000..378cc09
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,285 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of syscall.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 0000000..35832f0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 0000000..dc1ea60
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package archive
+
+import (
+ "syscall"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.Uid() != newStat.Uid() ||
+ oldStat.Gid() != newStat.Gid() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 0000000..6026575
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,20 @@
+package archive
+
+import (
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.ModTime() != newStat.ModTime() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.IsDir()
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 0000000..576f336
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,308 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+ if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) {
+ if !HasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// AssertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func AssertsDirectory(path string) bool {
+ return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path)
+}
+
+// HasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func HasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// SpecifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func SpecifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its
+// parent directory and its basename in that directory.
+func SplitPathDirEntry(localizedPath string) (dir, base string) {
+ normalizedPath := filepath.ToSlash(localizedPath)
+ vol := filepath.VolumeName(normalizedPath)
+ normalizedPath = normalizedPath[len(vol):]
+
+ if normalizedPath == "/" {
+ // Specifies the root path.
+ return filepath.FromSlash(vol + normalizedPath), "."
+ }
+
+ trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
+
+ dir = filepath.FromSlash(path.Dir(trimmedPath))
+ base = filepath.FromSlash(path.Base(trimmedPath))
+
+ return dir, base
+}
+
+// TarResource archives the resource at the given sourcePath into a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourcePath string) (content Archive, err error) {
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
+ // In the case where the source path is a symbolic link AND it ends
+ // with a path separator, we will want to evaluate the symbolic link.
+ trimmedPath := sourcePath[:len(sourcePath)-1]
+ stat, err := os.Lstat(trimmedPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if stat.Mode()&os.ModeSymlink != 0 {
+ if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ // Separate the source path between it's directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+ filter := []string{sourceBase}
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+ return TarWithOptions(sourceDir, &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ })
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+}
+
+// CopyInfoStatPath stats the given path to create a CopyInfo
+// struct representing that resource. If mustExist is true, then
+// it is an error if there is no file or directory at the given path.
+func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
+ pathInfo := CopyInfo{Path: path}
+
+ fileInfo, err := os.Lstat(path)
+
+ if err == nil {
+ pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
+ } else if os.IsNotExist(err) && !mustExist {
+ err = nil
+ }
+
+ return pathInfo, err
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case AssertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// rebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurance of oldBase with newBase at the beginning of entry names.
+func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcPath)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
+ dstInfo, err := CopyInfoStatPath(dstPath, false)
+ if err != nil {
+ return err
+ }
+
+ if !dstInfo.Exists {
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(dstPath)
+
+ dstStat, err := os.Lstat(dstParent)
+ if err != nil {
+ return err
+ }
+ if !dstStat.IsDir() {
+ return ErrNotDirectory
+ }
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 0000000..e305b5e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 0000000..2b775b4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 0000000..10a63a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,210 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+)
+
+func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertantly.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, ".wh..wh.") {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
+ return 0, err
+ }
+ }
+ continue
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, ".wh.") {
+ originalBase := base[len(".wh."):]
+ originalPath := filepath.Join(filepath.Dir(path), originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
+ if err := syscall.UtimesNano(path, ts); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
+ return applyLayerHandler(dest, layer, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
+ return applyLayerHandler(dest, layer, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ if decompress {
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return UnpackLayer(dest, layer)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 0000000..a5e08e4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 0000000..3448569
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 0000000..e85aac0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 0000000..dfb335c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io/ioutil"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (Archive, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return ioutil.NopCloser(buf), nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 0000000..1b8cadc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,196 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+)
+
+// exclusion return true if the specified pattern is an exclusion
+func exclusion(pattern string) bool {
+ return pattern[0] == '!'
+}
+
+// empty return true if the specified pattern is empty
+func empty(pattern string) bool {
+ return pattern == ""
+}
+
+// CleanPatterns takes a slice of patterns returns a new
+// slice of patterns cleaned with filepath.Clean, stripped
+// of any empty patterns and lets the caller know whether the
+// slice contains any exception patterns (prefixed with !).
+func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
+ // Loop over exclusion patterns and:
+ // 1. Clean them up.
+ // 2. Indicate whether we are dealing with any exception rules.
+ // 3. Error if we see a single exclusion marker on it's own (!).
+ cleanedPatterns := []string{}
+ patternDirs := [][]string{}
+ exceptions := false
+ for _, pattern := range patterns {
+ // Eliminate leading and trailing whitespace.
+ pattern = strings.TrimSpace(pattern)
+ if empty(pattern) {
+ continue
+ }
+ if exclusion(pattern) {
+ if len(pattern) == 1 {
+ return nil, nil, false, errors.New("Illegal exclusion pattern: !")
+ }
+ exceptions = true
+ }
+ pattern = filepath.Clean(pattern)
+ cleanedPatterns = append(cleanedPatterns, pattern)
+ if exclusion(pattern) {
+ pattern = pattern[1:]
+ }
+ patternDirs = append(patternDirs, strings.Split(pattern, "/"))
+ }
+
+ return cleanedPatterns, patternDirs, exceptions, nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ patterns, patDirs, _, err := CleanPatterns(patterns)
+ if err != nil {
+ return false, err
+ }
+
+ return OptimizedMatches(file, patterns, patDirs)
+}
+
+// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
+// It will assume that the inputs have been preprocessed and therefore the function
+// doen't need to do as much error checking and clean-up. This was done to avoid
+// repeating these steps on each file being checked during the archive process.
+// The more generic fileutils.Matches() can't make these assumptions.
+func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
+ matched := false
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, "/")
+
+ for i, pattern := range patterns {
+ negative := false
+
+ if exclusion(pattern) {
+ negative = true
+ pattern = pattern[1:]
+ }
+
+ match, err := filepath.Match(pattern, file)
+ if err != nil {
+ return false, err
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(patDirs[i]) <= len(parentPathDirs) {
+ match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
+ strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and remove
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
new file mode 100644
index 0000000..dcae178
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
@@ -0,0 +1,39 @@
+package homedir
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ if runtime.GOOS == "windows" {
+ return "USERPROFILE"
+ }
+ return "HOME"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ home := os.Getenv(Key())
+ if home == "" && runtime.GOOS != "windows" {
+ if u, err := user.CurrentUser(); err == nil {
+ return u.Home
+ }
+ }
+ return home
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ if runtime.GOOS == "windows" {
+ return "%USERPROFILE%" // be careful while using in format functions
+ }
+ return "~"
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
new file mode 100644
index 0000000..801132f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
@@ -0,0 +1,14 @@
+package ioutils
+
+import (
+ "fmt"
+ "io"
+)
+
+// FprintfIfNotEmpty prints the string value if it's not empty
+func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
+ if value != "" {
+ return fmt.Fprintf(w, format, value)
+ }
+ return 0, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go
new file mode 100644
index 0000000..f231aa9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go
@@ -0,0 +1,226 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+type pos struct {
+ idx int
+ offset int64
+}
+
+type multiReadSeeker struct {
+ readers []io.ReadSeeker
+ pos *pos
+ posIdx map[io.ReadSeeker]int
+}
+
+func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ var tmpOffset int64
+ switch whence {
+ case os.SEEK_SET:
+ for i, rdr := range r.readers {
+ // get size of the current reader
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ if offset > tmpOffset+s {
+ if i == len(r.readers)-1 {
+ rdrOffset := s + (offset - tmpOffset)
+ if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ r.pos = &pos{i, rdrOffset}
+ return offset, nil
+ }
+
+ tmpOffset += s
+ continue
+ }
+
+ rdrOffset := offset - tmpOffset
+ idx := i
+
+ rdr.Seek(rdrOffset, os.SEEK_SET)
+ // make sure all following readers are at 0
+ for _, rdr := range r.readers[i+1:] {
+ rdr.Seek(0, os.SEEK_SET)
+ }
+
+ if rdrOffset == s && i != len(r.readers)-1 {
+ idx += 1
+ rdrOffset = 0
+ }
+ r.pos = &pos{idx, rdrOffset}
+ return offset, nil
+ }
+ case os.SEEK_END:
+ for _, rdr := range r.readers {
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+ tmpOffset += s
+ }
+ r.Seek(tmpOffset+offset, os.SEEK_SET)
+ return tmpOffset + offset, nil
+ case os.SEEK_CUR:
+ if r.pos == nil {
+ return r.Seek(offset, os.SEEK_SET)
+ }
+ // Just return the current offset
+ if offset == 0 {
+ return r.getCurOffset()
+ }
+
+ curOffset, err := r.getCurOffset()
+ if err != nil {
+ return -1, err
+ }
+ rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
+ if err != nil {
+ return -1, err
+ }
+
+ r.pos = &pos{r.posIdx[rdr], rdrOffset}
+ return curOffset + offset, nil
+ default:
+ return -1, fmt.Errorf("Invalid whence: %d", whence)
+ }
+
+ return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
+}
+
+func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
+ var rdr io.ReadSeeker
+ var rdrOffset int64
+
+ for i, rdr := range r.readers {
+ offsetTo, err := r.getOffsetToReader(rdr)
+ if err != nil {
+ return nil, -1, err
+ }
+ if offsetTo > offset {
+ rdr = r.readers[i-1]
+ rdrOffset = offsetTo - offset
+ break
+ }
+
+ if rdr == r.readers[len(r.readers)-1] {
+ rdrOffset = offsetTo + offset
+ break
+ }
+ }
+
+ return rdr, rdrOffset, nil
+}
+
+func (r *multiReadSeeker) getCurOffset() (int64, error) {
+ var totalSize int64
+ for _, rdr := range r.readers[:r.pos.idx+1] {
+ if r.posIdx[rdr] == r.pos.idx {
+ totalSize += r.pos.offset
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, fmt.Errorf("error getting seeker size: %v", err)
+ }
+ totalSize += size
+ }
+ return totalSize, nil
+}
+
+func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
+ var offset int64
+ for _, r := range r.readers {
+ if r == rdr {
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, err
+ }
+ offset += size
+ }
+ return offset, nil
+}
+
+func (r *multiReadSeeker) Read(b []byte) (int, error) {
+ if r.pos == nil {
+ r.pos = &pos{0, 0}
+ }
+
+ bCap := int64(cap(b))
+ buf := bytes.NewBuffer(nil)
+ var rdr io.ReadSeeker
+
+ for _, rdr = range r.readers[r.pos.idx:] {
+ readBytes, err := io.CopyN(buf, rdr, bCap)
+ if err != nil && err != io.EOF {
+ return -1, err
+ }
+ bCap -= readBytes
+
+ if bCap == 0 {
+ break
+ }
+ }
+
+ rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+ r.pos = &pos{r.posIdx[rdr], rdrPos}
+ return buf.Read(b)
+}
+
+func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
+ // save the current position
+ pos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+
+ // get the size
+ size, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ // reset the position
+ if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ return size, nil
+}
+
+// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
+// input readseekers. After calling this method the initial position is set to the
+// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
+// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
+// Seek can be used over the sum of lengths of all readseekers.
+//
+// When a MultiReadSeeker is used, no Read and Seek operations should be made on
+// its ReadSeeker components. Also, users should make no assumption on the state
+// of individual readseekers while the MultiReadSeeker is used.
+func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+ if len(readers) == 1 {
+ return readers[0]
+ }
+ idx := make(map[io.ReadSeeker]int)
+ for i, rdr := range readers {
+ idx[rdr] = i
+ }
+ return &multiReadSeeker{
+ readers: readers,
+ posIdx: idx,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 0000000..ff09baa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,254 @@
+package ioutils
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+ "math/big"
+ "sync"
+ "time"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// bufReader allows the underlying reader to continue to produce
+// output by pre-emptively reading from the wrapped reader.
+// This is achieved by buffering this data in bufReader's
+// expanding buffer.
+type bufReader struct {
+ sync.Mutex
+ buf *bytes.Buffer
+ reader io.Reader
+ err error
+ wait sync.Cond
+ drainBuf []byte
+ reuseBuf []byte
+ maxReuse int64
+ resetTimeout time.Duration
+ bufLenResetThreshold int64
+ maxReadDataReset int64
+}
+
+func NewBufReader(r io.Reader) *bufReader {
+ var timeout int
+ if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil {
+ timeout = int(randVal.Int64()) + 180
+ } else {
+ timeout = 300
+ }
+ reader := &bufReader{
+ buf: &bytes.Buffer{},
+ drainBuf: make([]byte, 1024),
+ reuseBuf: make([]byte, 4096),
+ maxReuse: 1000,
+ resetTimeout: time.Second * time.Duration(timeout),
+ bufLenResetThreshold: 100 * 1024,
+ maxReadDataReset: 10 * 1024 * 1024,
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader {
+ reader := &bufReader{
+ buf: buffer,
+ drainBuf: drainBuffer,
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func (r *bufReader) drain() {
+ var (
+ duration time.Duration
+ lastReset time.Time
+ now time.Time
+ reset bool
+ bufLen int64
+ dataSinceReset int64
+ maxBufLen int64
+ reuseBufLen int64
+ reuseCount int64
+ )
+ reuseBufLen = int64(len(r.reuseBuf))
+ lastReset = time.Now()
+ for {
+ n, err := r.reader.Read(r.drainBuf)
+ dataSinceReset += int64(n)
+ r.Lock()
+ bufLen = int64(r.buf.Len())
+ if bufLen > maxBufLen {
+ maxBufLen = bufLen
+ }
+
+ // Avoid unbounded growth of the buffer over time.
+ // This has been discovered to be the only non-intrusive
+ // solution to the unbounded growth of the buffer.
+ // Alternative solutions such as compression, multiple
+ // buffers, channels and other similar pieces of code
+ // were reducing throughput, overall Docker performance
+ // or simply crashed Docker.
+ // This solution releases the buffer when specific
+ // conditions are met to avoid the continuous resizing
+ // of the buffer for long lived containers.
+ //
+ // Move data to the front of the buffer if it's
+ // smaller than what reuseBuf can store
+ if bufLen > 0 && reuseBufLen >= bufLen {
+ n, _ := r.buf.Read(r.reuseBuf)
+ r.buf.Write(r.reuseBuf[0:n])
+ // Take action if the buffer has been reused too many
+ // times and if there's data in the buffer.
+ // The timeout is also used as means to avoid doing
+ // these operations more often or less often than
+ // required.
+ // The various conditions try to detect heavy activity
+ // in the buffer which might be indicators of heavy
+ // growth of the buffer.
+ } else if reuseCount >= r.maxReuse && bufLen > 0 {
+ now = time.Now()
+ duration = now.Sub(lastReset)
+ timeoutReached := duration >= r.resetTimeout
+
+ // The timeout has been reached and the
+ // buffered data couldn't be moved to the front
+ // of the buffer, so the buffer gets reset.
+ if timeoutReached && bufLen > reuseBufLen {
+ reset = true
+ }
+ // The amount of buffered data is too high now,
+ // reset the buffer.
+ if timeoutReached && maxBufLen >= r.bufLenResetThreshold {
+ reset = true
+ }
+ // Reset the buffer if a certain amount of
+ // data has gone through the buffer since the
+ // last reset.
+ if timeoutReached && dataSinceReset >= r.maxReadDataReset {
+ reset = true
+ }
+ // The buffered data is moved to a fresh buffer,
+ // swap the old buffer with the new one and
+ // reset all counters.
+ if reset {
+ newbuf := &bytes.Buffer{}
+ newbuf.ReadFrom(r.buf)
+ r.buf = newbuf
+ lastReset = now
+ reset = false
+ dataSinceReset = 0
+ maxBufLen = 0
+ reuseCount = 0
+ }
+ }
+ if err != nil {
+ r.err = err
+ } else {
+ r.buf.Write(r.drainBuf[0:n])
+ }
+ reuseCount++
+ r.wait.Signal()
+ r.Unlock()
+ callSchedulerIfNecessary()
+ if err != nil {
+ break
+ }
+ }
+}
+
+func (r *bufReader) Read(p []byte) (n int, err error) {
+ r.Lock()
+ defer r.Unlock()
+ for {
+ n, err = r.buf.Read(p)
+ if n > 0 {
+ return n, err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ r.wait.Wait()
+ }
+}
+
+func (r *bufReader) Close() error {
+ closer, ok := r.reader.(io.ReadCloser)
+ if !ok {
+ return nil
+ }
+ return closer.Close()
+}
+
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go
new file mode 100644
index 0000000..3c88f29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go
@@ -0,0 +1,6 @@
+// +build !gccgo
+
+package ioutils
+
+func callSchedulerIfNecessary() {
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
new file mode 100644
index 0000000..c11d02b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go
@@ -0,0 +1,13 @@
+// +build gccgo
+
+package ioutils
+
+import (
+ "runtime"
+)
+
+func callSchedulerIfNecessary() {
+ //allow or force Go scheduler to switch context, without explicitly
+ //forcing this will make it hang when using gccgo implementation
+ runtime.Gosched()
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000..2509547
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,47 @@
+package ioutils
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+type WriteFlusher struct {
+ sync.Mutex
+ w io.Writer
+ flusher http.Flusher
+ flushed bool
+}
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ wf.Lock()
+ defer wf.Unlock()
+ n, err = wf.w.Write(b)
+ wf.flushed = true
+ wf.flusher.Flush()
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ wf.Lock()
+ defer wf.Unlock()
+ wf.flushed = true
+ wf.flusher.Flush()
+}
+
+func (wf *WriteFlusher) Flushed() bool {
+ wf.Lock()
+ defer wf.Unlock()
+ return wf.flushed
+}
+
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var flusher http.Flusher
+ if f, ok := w.(http.Flusher); ok {
+ flusher = f
+ } else {
+ flusher = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: flusher}
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 0000000..43fdc44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,60 @@
+package ioutils
+
+import "io"
+
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+type NopFlusher struct{}
+
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// Wrap a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE
new file mode 100644
index 0000000..ac74d8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md
new file mode 100644
index 0000000..da00efa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md
@@ -0,0 +1,40 @@
+Package mflag (aka multiple-flag) implements command-line flag parsing.
+It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/)
+
+It adds:
+
+* both short and long flag version
+`./example -s red` `./example --string blue`
+
+* multiple names for the same option
+```
+$>./example -h
+Usage of example:
+ -s, --string="": a simple string
+```
+
+___
+It is very flexible on purpose, so you can do things like:
+```
+$>./example -h
+Usage of example:
+ -s, -string, --string="": a simple string
+```
+
+Or:
+```
+$>./example -h
+Usage of example:
+ -oldflag, --newflag="": a simple string
+```
+
+You can also hide some flags from the usage, so if we want only `--newflag`:
+```
+$>./example -h
+Usage of example:
+ --newflag="": a simple string
+$>./example -oldflag str
+str
+```
+
+See [example.go](example/example.go) for more details.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go
new file mode 100644
index 0000000..ebfa350
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go
@@ -0,0 +1,1201 @@
+// Copyright 2014-2015 The Docker & Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mflag implements command-line flag parsing.
+//
+// Usage:
+//
+// Define flags using flag.String(), Bool(), Int(), etc.
+//
+// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int.
+// import "flag /github.com/docker/docker/pkg/mflag"
+// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname")
+// If you like, you can bind the flag to a variable using the Var() functions.
+// var flagvar int
+// func init() {
+// // -flaghidden will work, but will be hidden from the usage
+// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname")
+// }
+// Or you can create custom flags that satisfy the Value interface (with
+// pointer receivers) and couple them to flag parsing by
+// flag.Var(&flagVal, []string{"name"}, "help message for flagname")
+// For such flags, the default value is just the initial value of the variable.
+//
+// You can also add "deprecated" flags, they are still usable, but are not shown
+// in the usage and will display a warning when you try to use them. `#` before
+// an option means this option is deprecated, if there is an following option
+// without `#` ahead, then that's the replacement, if not, it will just be removed:
+// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname")
+// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or
+// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.`
+// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname")
+// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.`
+// so you can only use `-f`.
+//
+// You can also group one letter flags, bif you declare
+// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose")
+// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow")
+// you will be able to use the -vs or -sv
+//
+// After all flags are defined, call
+// flag.Parse()
+// to parse the command line into the defined flags.
+//
+// Flags may then be used directly. If you're using the flags themselves,
+// they are all pointers; if you bind to variables, they're values.
+// fmt.Println("ip has value ", *ip)
+// fmt.Println("flagvar has value ", flagvar)
+//
+// After parsing, the arguments after the flag are available as the
+// slice flag.Args() or individually as flag.Arg(i).
+// The arguments are indexed from 0 through flag.NArg()-1.
+//
+// Command line flag syntax:
+// -flag
+// -flag=x
+// -flag="x"
+// -flag='x'
+// -flag x // non-boolean flags only
+// One or two minus signs may be used; they are equivalent.
+// The last form is not permitted for boolean flags because the
+// meaning of the command
+// cmd -x *
+// will change if there is a file called 0, false, etc. You must
+// use the -flag=false form to turn off a boolean flag.
+//
+// Flag parsing stops just before the first non-flag argument
+// ("-" is a non-flag argument) or after the terminator "--".
+//
+// Integer flags accept 1234, 0664, 0x1234 and may be negative.
+// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False.
+// Duration flags accept any input valid for time.ParseDuration.
+//
+// The default set of command-line flags is controlled by
+// top-level functions. The FlagSet type allows one to define
+// independent sets of flags, such as to implement subcommands
+// in a command-line interface. The methods of FlagSet are
+// analogous to the top-level functions for the command-line
+// flag set.
+
+package mflag
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("flag: help requested")
+
+// ErrRetry is the error returned if you need to try letter by letter
+var ErrRetry = errors.New("flag: retry")
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Get() interface{} { return bool(*b) }
+
+func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Get() interface{} { return int(*i) }
+
+func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Get() interface{} { return int64(*i) }
+
+func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Get() interface{} { return uint(*i) }
+
+func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Get() interface{} { return uint64(*i) }
+
+func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+
+func (s *stringValue) Get() interface{} { return string(*s) }
+
+func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Get() interface{} { return float64(*f) }
+
+func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Get() interface{} { return time.Duration(*d) }
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+//
+// If a Value has an IsBoolFlag() bool method returning true,
+// the command-line parser makes -name equivalent to -name=true
+// rather than using the next command-line argument.
+type Value interface {
+ String() string
+ Set(string) error
+}
+
+// Getter is an interface that allows the contents of a Value to be retrieved.
+// It wraps the Value interface, rather than being part of it, because it
+// appeared after Go 1 and its compatibility rules. All Value types provided
+// by this package satisfy the Getter interface.
+type Getter interface {
+ Value
+ Get() interface{}
+}
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+// ErrorHandling strategies available when a flag parsing error occurs
+const (
+ ContinueOnError ErrorHandling = iota
+ ExitOnError
+ PanicOnError
+)
+
+// A FlagSet represents a set of defined flags. The zero value of a FlagSet
+// has no name and has ContinueOnError error handling.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+ ShortUsage func()
+
+ name string
+ parsed bool
+ actual map[string]*Flag
+ formal map[string]*Flag
+ args []string // arguments after flags
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use Out() accessor
+ nArgRequirements []nArgRequirement
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Names []string // name as it appears on command line
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+}
+
+type flagSlice []string
+
+func (p flagSlice) Len() int { return len(p) }
+func (p flagSlice) Less(i, j int) bool {
+ pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-")
+ lpi, lpj := strings.ToLower(pi), strings.ToLower(pj)
+ if lpi != lpj {
+ return lpi < lpj
+ }
+ return pi < pj
+}
+func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[string]*Flag) []*Flag {
+ var list flagSlice
+
+ // The sorted list is based on the first name, when flag map might use the other names.
+ nameMap := make(map[string]string)
+
+ for n, f := range flags {
+ fName := strings.TrimPrefix(f.Names[0], "#")
+ nameMap[fName] = n
+ if len(f.Names) == 1 {
+ list = append(list, fName)
+ continue
+ }
+
+ found := false
+ for _, name := range list {
+ if name == fName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ list = append(list, fName)
+ }
+ }
+ sort.Sort(list)
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[nameMap[name]]
+ }
+ return result
+}
+
+// Name returns the name of the FlagSet.
+func (fs *FlagSet) Name() string {
+ return fs.name
+}
+
+// Out returns the destination for usage and error messages.
+func (fs *FlagSet) Out() io.Writer {
+ if fs.output == nil {
+ return os.Stderr
+ }
+ return fs.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (fs *FlagSet) SetOutput(output io.Writer) {
+ fs.output = output
+}
+
+// VisitAll visits the flags in lexicographical order, calling fn for each.
+// It visits all flags, even those not set.
+func (fs *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(fs.formal) {
+ fn(flag)
+ }
+}
+
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order, calling fn for each.
+// It visits only those flags that have been set.
+func (fs *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(fs.actual) {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (fs *FlagSet) Lookup(name string) *Flag {
+ return fs.formal[name]
+}
+
+// IsSet indicates whether the specified flag is set in the given FlagSet
+func (fs *FlagSet) IsSet(name string) bool {
+ return fs.actual[name] != nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.formal[name]
+}
+
+// IsSet indicates whether the specified flag was specified at all on the cmd line.
+func IsSet(name string) bool {
+ return CommandLine.IsSet(name)
+}
+
+type nArgRequirementType int
+
+// Indicator used to pass to BadArgs function
+const (
+ Exact nArgRequirementType = iota
+ Max
+ Min
+)
+
+type nArgRequirement struct {
+ Type nArgRequirementType
+ N int
+}
+
+// Require adds a requirement about the number of arguments for the FlagSet.
+// The first parameter can be Exact, Max, or Min to respectively specify the exact,
+// the maximum, or the minimal number of arguments required.
+// The actual check is done in FlagSet.CheckArgs().
+func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) {
+ fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg})
+}
+
+// CheckArgs uses the requirements set by FlagSet.Require() to validate
+// the number of arguments. If the requirements are not met,
+// an error message string is returned.
+func (fs *FlagSet) CheckArgs() (message string) {
+ for _, req := range fs.nArgRequirements {
+ var arguments string
+ if req.N == 1 {
+ arguments = "1 argument"
+ } else {
+ arguments = fmt.Sprintf("%d arguments", req.N)
+ }
+
+ str := func(kind string) string {
+ return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments)
+ }
+
+ switch req.Type {
+ case Exact:
+ if fs.NArg() != req.N {
+ return str("")
+ }
+ case Max:
+ if fs.NArg() > req.N {
+ return str("a maximum of ")
+ }
+ case Min:
+ if fs.NArg() < req.N {
+ return str("a minimum of ")
+ }
+ }
+ }
+ return ""
+}
+
+// Set sets the value of the named flag.
+func (fs *FlagSet) Set(name, value string) error {
+ flag, ok := fs.formal[name]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if err := flag.Value.Set(value); err != nil {
+ return err
+ }
+ if fs.actual == nil {
+ fs.actual = make(map[string]*Flag)
+ }
+ fs.actual[name] = flag
+ return nil
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (fs *FlagSet) PrintDefaults() {
+ writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0)
+ home := homedir.Get()
+
+ // Don't substitute when HOME is /
+ if runtime.GOOS != "windows" && home == "/" {
+ home = ""
+ }
+
+ // Add a blank line between cmd description and list of options
+ if fs.FlagCount() > 0 {
+ fmt.Fprintln(writer, "")
+ }
+
+ fs.VisitAll(func(flag *Flag) {
+ format := " -%s=%s"
+ names := []string{}
+ for _, name := range flag.Names {
+ if name[0] != '#' {
+ names = append(names, name)
+ }
+ }
+ if len(names) > 0 && len(flag.Usage) > 0 {
+ val := flag.DefValue
+
+ if home != "" && strings.HasPrefix(val, home) {
+ val = homedir.GetShortcutString() + val[len(home):]
+ }
+
+ fmt.Fprintf(writer, format, strings.Join(names, ", -"), val)
+ for i, line := range strings.Split(flag.Usage, "\n") {
+ if i != 0 {
+ line = " " + line
+ }
+ fmt.Fprintln(writer, "\t", line)
+ }
+ }
+ })
+ writer.Flush()
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(fs *FlagSet) {
+ if fs.name == "" {
+ fmt.Fprintf(fs.Out(), "Usage:\n")
+ } else {
+ fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name)
+ }
+ fs.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+var Usage = func() {
+ fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// Usage prints to standard error a usage message documenting the standard command layout
+// The function is a variable that may be changed to point to a custom function.
+var ShortUsage = func() {
+ fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0])
+}
+
+// FlagCount returns the number of flags that have been defined.
+func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) }
+
+// FlagCountUndeprecated returns the number of undeprecated flags that have been defined.
+func (fs *FlagSet) FlagCountUndeprecated() int {
+ count := 0
+ for _, flag := range sortFlags(fs.formal) {
+ for _, name := range flag.Names {
+ if name[0] != '#' {
+ count++
+ break
+ }
+ }
+ }
+ return count
+}
+
+// NFlag returns the number of flags that have been set.
+func (fs *FlagSet) NFlag() int { return len(fs.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (fs *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(fs.args) {
+ return ""
+ }
+ return fs.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (fs *FlagSet) NArg() int { return len(fs.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (fs *FlagSet) Args() []string { return fs.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) {
+ fs.Var(newBoolValue(value, p), names, usage)
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, names []string, value bool, usage string) {
+ CommandLine.Var(newBoolValue(value, p), names, usage)
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool {
+ p := new(bool)
+ fs.BoolVar(p, names, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(names []string, value bool, usage string) *bool {
+ return CommandLine.Bool(names, value, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) {
+ fs.Var(newIntValue(value, p), names, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, names []string, value int, usage string) {
+ CommandLine.Var(newIntValue(value, p), names, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (fs *FlagSet) Int(names []string, value int, usage string) *int {
+ p := new(int)
+ fs.IntVar(p, names, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(names []string, value int, usage string) *int {
+ return CommandLine.Int(names, value, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) {
+ fs.Var(newInt64Value(value, p), names, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, names []string, value int64, usage string) {
+ CommandLine.Var(newInt64Value(value, p), names, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 {
+ p := new(int64)
+ fs.Int64Var(p, names, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(names []string, value int64, usage string) *int64 {
+ return CommandLine.Int64(names, value, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) {
+ fs.Var(newUintValue(value, p), names, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, names []string, value uint, usage string) {
+ CommandLine.Var(newUintValue(value, p), names, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint {
+ p := new(uint)
+ fs.UintVar(p, names, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(names []string, value uint, usage string) *uint {
+ return CommandLine.Uint(names, value, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) {
+ fs.Var(newUint64Value(value, p), names, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, names []string, value uint64, usage string) {
+ CommandLine.Var(newUint64Value(value, p), names, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ fs.Uint64Var(p, names, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(names []string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64(names, value, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) {
+ fs.Var(newStringValue(value, p), names, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, names []string, value string, usage string) {
+ CommandLine.Var(newStringValue(value, p), names, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (fs *FlagSet) String(names []string, value string, usage string) *string {
+ p := new(string)
+ fs.StringVar(p, names, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(names []string, value string, usage string) *string {
+ return CommandLine.String(names, value, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) {
+ fs.Var(newFloat64Value(value, p), names, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, names []string, value float64, usage string) {
+ CommandLine.Var(newFloat64Value(value, p), names, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 {
+ p := new(float64)
+ fs.Float64Var(p, names, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(names []string, value float64, usage string) *float64 {
+ return CommandLine.Float64(names, value, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) {
+ fs.Var(newDurationValue(value, p), names, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) {
+ CommandLine.Var(newDurationValue(value, p), names, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ fs.DurationVar(p, names, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(names []string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.Duration(names, value, usage)
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (fs *FlagSet) Var(value Value, names []string, usage string) {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{names, usage, value, value.String()}
+ for _, name := range names {
+ name = strings.TrimPrefix(name, "#")
+ _, alreadythere := fs.formal[name]
+ if alreadythere {
+ var msg string
+ if fs.name == "" {
+ msg = fmt.Sprintf("flag redefined: %s", name)
+ } else {
+ msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name)
+ }
+ fmt.Fprintln(fs.Out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if fs.formal == nil {
+ fs.formal = make(map[string]*Flag)
+ }
+ fs.formal[name] = flag
+ }
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, names []string, usage string) {
+ CommandLine.Var(value, names, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (fs *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ fmt.Fprintln(fs.Out(), err)
+ if os.Args[0] == fs.name {
+ fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0])
+ } else {
+ fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name)
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (fs *FlagSet) usage() {
+ if fs == CommandLine {
+ Usage()
+ } else if fs.Usage == nil {
+ defaultUsage(fs)
+ } else {
+ fs.Usage()
+ }
+}
+
+func trimQuotes(str string) string {
+ if len(str) == 0 {
+ return str
+ }
+ type quote struct {
+ start, end byte
+ }
+
+ // All valid quote types.
+ quotes := []quote{
+ // Double quotes
+ {
+ start: '"',
+ end: '"',
+ },
+
+ // Single quotes
+ {
+ start: '\'',
+ end: '\'',
+ },
+ }
+
+ for _, quote := range quotes {
+ // Only strip if outermost match.
+ if str[0] == quote.start && str[len(str)-1] == quote.end {
+ str = str[1 : len(str)-1]
+ break
+ }
+ }
+
+ return str
+}
+
+// parseOne parses one flag. It reports whether a flag was seen.
+func (fs *FlagSet) parseOne() (bool, string, error) {
+ if len(fs.args) == 0 {
+ return false, "", nil
+ }
+ s := fs.args[0]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ return false, "", nil
+ }
+ if s[1] == '-' && len(s) == 2 { // "--" terminates the flags
+ fs.args = fs.args[1:]
+ return false, "", nil
+ }
+ name := s[1:]
+ if len(name) == 0 || name[0] == '=' {
+ return false, "", fs.failf("bad flag syntax: %s", s)
+ }
+
+ // it's a flag. does it have an argument?
+ fs.args = fs.args[1:]
+ hasValue := false
+ value := ""
+ if i := strings.Index(name, "="); i != -1 {
+ value = trimQuotes(name[i+1:])
+ hasValue = true
+ name = name[:i]
+ }
+
+ m := fs.formal
+ flag, alreadythere := m[name] // BUG
+ if !alreadythere {
+ if name == "-help" || name == "help" || name == "h" { // special case for nice help message.
+ fs.usage()
+ return false, "", ErrHelp
+ }
+ if len(name) > 0 && name[0] == '-' {
+ return false, "", fs.failf("flag provided but not defined: -%s", name)
+ }
+ return false, name, ErrRetry
+ }
+ if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
+ if hasValue {
+ if err := fv.Set(value); err != nil {
+ return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err)
+ }
+ } else {
+ fv.Set("true")
+ }
+ } else {
+ // It must have a value, which might be the next argument.
+ if !hasValue && len(fs.args) > 0 {
+ // value is the next arg
+ hasValue = true
+ value, fs.args = fs.args[0], fs.args[1:]
+ }
+ if !hasValue {
+ return false, "", fs.failf("flag needs an argument: -%s", name)
+ }
+ if err := flag.Value.Set(value); err != nil {
+ return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err)
+ }
+ }
+ if fs.actual == nil {
+ fs.actual = make(map[string]*Flag)
+ }
+ fs.actual[name] = flag
+ for i, n := range flag.Names {
+ if n == fmt.Sprintf("#%s", name) {
+ replacement := ""
+ for j := i; j < len(flag.Names); j++ {
+ if flag.Names[j][0] != '#' {
+ replacement = flag.Names[j]
+ break
+ }
+ }
+ if replacement != "" {
+ fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement)
+ } else {
+ fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name)
+ }
+ }
+ }
+ return true, "", nil
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (fs *FlagSet) Parse(arguments []string) error {
+ fs.parsed = true
+ fs.args = arguments
+ for {
+ seen, name, err := fs.parseOne()
+ if seen {
+ continue
+ }
+ if err == nil {
+ break
+ }
+ if err == ErrRetry {
+ if len(name) > 1 {
+ err = nil
+ for _, letter := range strings.Split(name, "") {
+ fs.args = append([]string{"-" + letter}, fs.args...)
+ seen2, _, err2 := fs.parseOne()
+ if seen2 {
+ continue
+ }
+ if err2 != nil {
+ err = fs.failf("flag provided but not defined: -%s", name)
+ break
+ }
+ }
+ if err == nil {
+ continue
+ }
+ } else {
+ err = fs.failf("flag provided but not defined: -%s", name)
+ }
+ }
+ switch fs.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// ParseFlags is a utility function that adds a help flag if withHelp is true,
+// calls fs.Parse(args) and prints a relevant error message if there are
+// incorrect number of arguments. It returns error only if error handling is
+// set to ContinueOnError and parsing fails. If error handling is set to
+// ExitOnError, it's safe to ignore the return value.
+func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error {
+ var help *bool
+ if withHelp {
+ help = fs.Bool([]string{"#help", "-help"}, false, "Print usage")
+ }
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+ if help != nil && *help {
+ fs.SetOutput(os.Stdout)
+ fs.Usage()
+ os.Exit(0)
+ }
+ if str := fs.CheckArgs(); str != "" {
+ fs.SetOutput(os.Stderr)
+ fs.ReportError(str, withHelp)
+ fs.ShortUsage()
+ os.Exit(1)
+ }
+ return nil
+}
+
+// ReportError is a utility method that prints a user-friendly message
+// containing the error that occured during parsing and a suggestion to get help
+func (fs *FlagSet) ReportError(str string, withHelp bool) {
+ if withHelp {
+ if os.Args[0] == fs.Name() {
+ str += ".\nSee '" + os.Args[0] + " --help'"
+ } else {
+ str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
+ }
+ }
+ fmt.Fprintf(fs.Out(), "docker: %s.\n", str)
+}
+
+// Parsed reports whether fs.Parse has been called.
+func (fs *FlagSet) Parsed() bool {
+ return fs.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+// The top-level functions such as BoolVar, Arg, and on are wrappers for the
+// methods of CommandLine.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ }
+ return f
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ fs.name = name
+ fs.errorHandling = errorHandling
+}
+
+type mergeVal struct {
+ Value
+ key string
+ fset *FlagSet
+}
+
+func (v mergeVal) Set(s string) error {
+ return v.fset.Set(v.key, s)
+}
+
+func (v mergeVal) IsBoolFlag() bool {
+ if b, ok := v.Value.(boolFlag); ok {
+ return b.IsBoolFlag()
+ }
+ return false
+}
+
+// Merge is an helper function that merges n FlagSets into a single dest FlagSet
+// In case of name collision between the flagsets it will apply
+// the destination FlagSet's errorHandling behaviour.
+func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
+ for _, fset := range flagsets {
+ for k, f := range fset.formal {
+ if _, ok := dest.formal[k]; ok {
+ var err error
+ if fset.name == "" {
+ err = fmt.Errorf("flag redefined: %s", k)
+ } else {
+ err = fmt.Errorf("%s flag redefined: %s", fset.name, k)
+ }
+ fmt.Fprintln(fset.Out(), err.Error())
+ // Happens only if flags are declared with identical names
+ switch dest.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ newF := *f
+ newF.Value = mergeVal{f.Value, k, fset}
+ dest.formal[k] = &newF
+ }
+ }
+ return nil
+}
+
+// IsEmpty reports if the FlagSet is actually empty.
+func (fs *FlagSet) IsEmpty() bool {
+ return len(fs.actual) == 0
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go
new file mode 100644
index 0000000..e326a11
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go
@@ -0,0 +1,187 @@
+// Package parsers provides helper functions to parse and validate different type
+// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel
+// operating system versions.
+package parsers
+
+import (
+ "fmt"
+ "net/url"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// ParseHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
+// FIXME: Change this not to receive default value as parameter
+func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
+ addr = strings.TrimSpace(addr)
+ if addr == "" {
+ if runtime.GOOS != "windows" {
+ addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
+ } else {
+ // Note - defaultTCPAddr already includes tcp:// prefix
+ addr = defaultTCPAddr
+ }
+ }
+ addrParts := strings.Split(addr, "://")
+ if len(addrParts) == 1 {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return ParseTCPAddr(addrParts[1], defaultTCPAddr)
+ case "unix":
+ return ParseUnixAddr(addrParts[1], defaultUnixAddr)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// ParseUnixAddr parses and validates that the specified address is a valid UNIX
+// socket address. It returns a formatted UNIX socket address, either using the
+// address parsed from addr, or the contents of defaultAddr if addr is a blank
+// string.
+func ParseUnixAddr(addr string, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, "unix://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("unix://%s", addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from addr, or the contents of defaultAddr if addr is a blank string.
+func ParseTCPAddr(addr string, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr)
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+ hostParts := strings.Split(u.Host, ":")
+ if len(hostParts) != 2 {
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+ host := hostParts[0]
+ if host == "" {
+ host = defaultAddr
+ }
+
+ p, err := strconv.Atoi(hostParts[1])
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+ return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil
+}
+
+// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest
+// The tag can be confusing because of a port in a repository name.
+// Ex: localhost.localdomain:5000/samalba/hipache:latest
+// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
+func ParseRepositoryTag(repos string) (string, string) {
+ n := strings.Index(repos, "@")
+ if n >= 0 {
+ parts := strings.Split(repos, "@")
+ return parts[0], parts[1]
+ }
+ n = strings.LastIndex(repos, ":")
+ if n < 0 {
+ return repos, ""
+ }
+ if tag := repos[n+1:]; !strings.Contains(tag, "/") {
+ return repos[:n], tag
+ }
+ return repos, ""
+}
+
+// PartParser parses and validates the specified string (data) using the specified template
+// e.g. ip:public:private -> 192.168.0.1:80:8000
+func PartParser(template, data string) (map[string]string, error) {
+ // ip:public:private
+ var (
+ templateParts = strings.Split(template, ":")
+ parts = strings.Split(data, ":")
+ out = make(map[string]string, len(templateParts))
+ )
+ if len(parts) != len(templateParts) {
+ return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
+ }
+
+ for i, t := range templateParts {
+ value := ""
+ if len(parts) > i {
+ value = parts[i]
+ }
+ out[t] = value
+ }
+ return out, nil
+}
+
+// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
+func ParseKeyValueOpt(opt string) (string, string, error) {
+ parts := strings.SplitN(opt, "=", 2)
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ }
+ return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}
+
+// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
+func ParsePortRange(ports string) (uint64, uint64, error) {
+ if ports == "" {
+ return 0, 0, fmt.Errorf("Empty string specified for ports.")
+ }
+ if !strings.Contains(ports, "-") {
+ start, err := strconv.ParseUint(ports, 10, 16)
+ end := start
+ return start, end, err
+ }
+
+ parts := strings.Split(ports, "-")
+ start, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ if end < start {
+ return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
+ }
+ return start, end, nil
+}
+
+// ParseLink parses and validates the specified string as a link format (name:alias)
+func ParseLink(val string) (string, string, error) {
+ if val == "" {
+ return "", "", fmt.Errorf("empty string specified for links")
+ }
+ arr := strings.Split(val, ":")
+ if len(arr) > 2 {
+ return "", "", fmt.Errorf("bad format for links: %s", val)
+ }
+ if len(arr) == 1 {
+ return val, val, nil
+ }
+ // This is kept because we can actually get an HostConfig with links
+ // from an already created container and the format is not `foo:bar`
+ // but `/foo:/c1/bar`
+ if strings.HasPrefix(arr[0], "/") {
+ _, alias := path.Split(arr[1])
+ return arr[0][1:], alias, nil
+ }
+ return arr[0], arr[1], nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 0000000..515fb4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,119 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool *BufioReaderPool
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ }
+ return &BufioReaderPool{pool: pool}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := BufioReader32KPool.Get(src)
+ written, err = io.Copy(dst, buf)
+ BufioReader32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ }
+ return &BufioWriterPool{pool: pool}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go
new file mode 100644
index 0000000..dd52b90
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error, 1)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 0000000..63b3df7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,168 @@
+package stdcopy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+)
+
+const (
+ StdWriterPrefixLen = 8
+ StdWriterFdIndex = 0
+ StdWriterSizeIndex = 4
+)
+
+type StdType [StdWriterPrefixLen]byte
+
+var (
+ Stdin StdType = StdType{0: 0}
+ Stdout StdType = StdType{0: 1}
+ Stderr StdType = StdType{0: 2}
+)
+
+type StdWriter struct {
+ io.Writer
+ prefix StdType
+ sizeBuf []byte
+}
+
+func (w *StdWriter) Write(buf []byte) (n int, err error) {
+ var n1, n2 int
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instantiated")
+ }
+ binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
+ n1, err = w.Writer.Write(w.prefix[:])
+ if err != nil {
+ n = n1 - StdWriterPrefixLen
+ } else {
+ n2, err = w.Writer.Write(buf)
+ n = n1 + n2 - StdWriterPrefixLen
+ }
+ if n < 0 {
+ n = 0
+ }
+ return
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) *StdWriter {
+ return &StdWriter{
+ Writer: w,
+ prefix: t,
+ sizeBuf: make([]byte, 4),
+ }
+}
+
+var ErrInvalidStdHeader = errors.New("Unrecognized input header")
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < StdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < StdWriterPrefixLen {
+ logrus.Debugf("Corrupted prefix: %v", buf[:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading header: %s", er)
+ return 0, er
+ }
+ }
+
+ // Check the first byte to know where to write
+ switch buf[StdWriterFdIndex] {
+ case 0:
+ fallthrough
+ case 1:
+ // Write on stdout
+ out = dstout
+ case 2:
+ // Write on stderr
+ out = dsterr
+ default:
+ logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
+ return 0, ErrInvalidStdHeader
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
+ logrus.Debugf("framesize: %d", frameSize)
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+StdWriterPrefixLen > bufLen {
+ logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
+ buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+StdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+StdWriterPrefixLen {
+ logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading frame: %s", er)
+ return 0, er
+ }
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
+ if ew != nil {
+ logrus.Debugf("Error writing frame: %s", ew)
+ return 0, ew
+ }
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+StdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + StdWriterPrefixLen
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 0000000..6304518
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,9 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 0000000..23f7c61
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,83 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ EVENT_ALL_ACCESS = 0x1F0003
+ EVENT_MODIFY_STATUS = 0x0002
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32 = 0
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32 = 0
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32 = 0
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+func SetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+func ResetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+func PulseEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 0000000..e1f70e8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+)
+
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 0000000..90b5006
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = os.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 0000000..d0e43b3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*Stat_t, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 0000000..eee1be2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,29 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+)
+
+// Some explanation for my own sanity, and hopefully maintainers in the
+// future.
+//
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+
+func Lstat(path string) (*Stat_t, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Stat_t{
+ name: fi.Name(),
+ size: fi.Size(),
+ mode: fi.Mode(),
+ modTime: fi.ModTime(),
+ isDir: fi.IsDir()}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 0000000..3b6e947
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 0000000..41f2bab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,71 @@
+package system
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units"
+)
+
+var (
+ ErrMalformed = errors.New("malformed file")
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given a io.Reader to the file.
+//
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 0000000..604d338
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,7 @@
+// +build !linux,!windows
+
+package system
+
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 0000000..d466425
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,44 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 0000000..26617eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,20 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev
+func Mknod(path string, mode uint32, dev int) error {
+ return syscall.Mknod(path, mode, dev)
+}
+
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 0000000..1811542
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,11 @@
+// +build windows
+
+package system
+
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 0000000..e2ecfe5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,46 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Stat_t type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file
+type Stat_t struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+func (s Stat_t) Mode() uint32 {
+ return s.mode
+}
+
+func (s Stat_t) Uid() uint32 {
+ return s.uid
+}
+
+func (s Stat_t) Gid() uint32 {
+ return s.gid
+}
+
+func (s Stat_t) Rdev() uint64 {
+ return s.rdev
+}
+
+func (s Stat_t) Size() int64 {
+ return s.size
+}
+
+func (s Stat_t) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+func (s Stat_t) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 0000000..4b2198b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return &Stat_t{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*Stat_t, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 0000000..80262d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return &Stat_t{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT exists only on linux, and loads a system.Stat_t from a
+// syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*Stat_t, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 0000000..5251ae2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows,!freebsd
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.Stat_t type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+ return &Stat_t{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 0000000..b1fd39e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "time"
+)
+
+type Stat_t struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
+}
+
+func (s Stat_t) Name() string {
+ return s.name
+}
+
+func (s Stat_t) Size() int64 {
+ return s.size
+}
+
+func (s Stat_t) Mode() os.FileMode {
+ return s.mode
+}
+
+func (s Stat_t) ModTime() time.Time {
+ return s.modTime
+}
+
+func (s Stat_t) IsDir() bool {
+ return s.isDir
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 0000000..fddbecd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+func Umask(newmask int) (oldmask int, err error) {
+ return syscall.Umask(newmask), nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 0000000..3be563f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,8 @@
+// +build windows
+
+package system
+
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go
new file mode 100644
index 0000000..4c6002f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -0,0 +1,11 @@
+package system
+
+import "syscall"
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 0000000..ceaa044
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 0000000..8f90298
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,28 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ AT_FDCWD := -100
+ AT_SYMLINK_NOFOLLOW := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return syscall.UtimesNano(path, ts)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 0000000..adf2734
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux,!freebsd,!darwin
+
+package system
+
+import "syscall"
+
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
+
+func UtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 0000000..00edb20
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,59 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Returns a nil slice and nil error if the xattr is not set
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ dest := make([]byte, 128)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno == syscall.ENODATA {
+ return nil, nil
+ }
+ if errno == syscall.ERANGE {
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+var _zero uintptr
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 0000000..0060c16
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package system
+
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go
new file mode 100644
index 0000000..8fb0d80
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go
@@ -0,0 +1,111 @@
+// Package ulimit provides structure and helper function to parse and represent
+// resource limits (Rlimit and Ulimit, its human friendly version).
+package ulimit
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// Parse parses and returns a Ulimit from the specified string.
+func Parse(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ limitVals := strings.SplitN(parts[1], ":", 2)
+ if len(limitVals) > 2 {
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ soft, err := strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ hard := soft // in case no hard was set
+ if len(limitVals) == 2 {
+ hard, err = strconv.ParseInt(limitVals[1], 10, 64)
+ }
+ if soft > hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go
new file mode 100644
index 0000000..c219a8a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go
new file mode 100644
index 0000000..2fde3b4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// using SI standard (eg. "44kB", "17MB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseInt(matches[1], 10, 0)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[2])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= mul
+ }
+
+ return size, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go
new file mode 100644
index 0000000..19c9d77
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go
@@ -0,0 +1,61 @@
+package volume
+
+// DefaultDriverName is the driver name used for the driver
+// implemented in the local package.
+const DefaultDriverName string = "local"
+
+// Driver is for creating and removing volumes.
+type Driver interface {
+ // Name returns the name of the volume driver.
+ Name() string
+ // Create makes a new volume with the given id.
+ Create(string) (Volume, error)
+ // Remove deletes the volume.
+ Remove(Volume) error
+}
+
+// Volume is a place to store data. It is backed by a specific driver, and can be mounted.
+type Volume interface {
+ // Name returns the name of the volume
+ Name() string
+ // DriverName returns the name of the driver which owns this volume.
+ DriverName() string
+ // Path returns the absolute path to the volume.
+ Path() string
+ // Mount mounts the volume and returns the absolute path to
+ // where it can be consumed.
+ Mount() (string, error)
+ // Unmount unmounts the volume when it is no longer in use.
+ Unmount() error
+}
+
+// read-write modes
+var rwModes = map[string]bool{
+ "rw": true,
+ "rw,Z": true,
+ "rw,z": true,
+ "z,rw": true,
+ "Z,rw": true,
+ "Z": true,
+ "z": true,
+}
+
+// read-only modes
+var roModes = map[string]bool{
+ "ro": true,
+ "ro,Z": true,
+ "ro,z": true,
+ "z,ro": true,
+ "Z,ro": true,
+}
+
+// ValidateMountMode will make sure the mount mode is valid.
+// returns if it's a valid mount mode and if it's read-write or not.
+func ValidateMountMode(mode string) (bool, bool) {
+ return roModes[mode] || rwModes[mode], rwModes[mode]
+}
+
+// ReadWrite tells you if a mode string is a valid read-only mode or not.
+func ReadWrite(mode string) bool {
+ return rwModes[mode]
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS
new file mode 100644
index 0000000..edbe200
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS
@@ -0,0 +1,2 @@
+Tianon Gravi (@tianon)
+Aleksa Sarai (@cyphar)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go
new file mode 100644
index 0000000..6f8a982
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go
@@ -0,0 +1,108 @@
+package user
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+)
+
+var (
+ // The current operating system does not provide the required data for user lookups.
+ ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
+)
+
+func lookupUser(filter func(u User) bool) (User, error) {
+ // Get operating system-specific passwd reader-closer.
+ passwd, err := GetPasswd()
+ if err != nil {
+ return User{}, err
+ }
+ defer passwd.Close()
+
+ // Get the users.
+ users, err := ParsePasswdFilter(passwd, filter)
+ if err != nil {
+ return User{}, err
+ }
+
+ // No user entries found.
+ if len(users) == 0 {
+ return User{}, fmt.Errorf("no matching entries in passwd file")
+ }
+
+ // Assume the first entry is the "correct" one.
+ return users[0], nil
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(syscall.Getuid())
+}
+
+// LookupUser looks up a user by their username in /etc/passwd. If the user
+// cannot be found (or there is no /etc/passwd file on the filesystem), then
+// LookupUser returns an error.
+func LookupUser(username string) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Name == username
+ })
+}
+
+// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
+// be found (or there is no /etc/passwd file on the filesystem), then LookupId
+// returns an error.
+func LookupUid(uid int) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Uid == uid
+ })
+}
+
+func lookupGroup(filter func(g Group) bool) (Group, error) {
+ // Get operating system-specific group reader-closer.
+ group, err := GetGroup()
+ if err != nil {
+ return Group{}, err
+ }
+ defer group.Close()
+
+ // Get the users.
+ groups, err := ParseGroupFilter(group, filter)
+ if err != nil {
+ return Group{}, err
+ }
+
+ // No user entries found.
+ if len(groups) == 0 {
+ return Group{}, fmt.Errorf("no matching entries in group file")
+ }
+
+ // Assume the first entry is the "correct" one.
+ return groups[0], nil
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(syscall.Getgid())
+}
+
+// LookupGroup looks up a group by its name in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGroup
+// returns an error.
+func LookupGroup(groupname string) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Name == groupname
+ })
+}
+
+// LookupGid looks up a group by its group id in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGid
+// returns an error.
+func LookupGid(gid int) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Gid == gid
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go
new file mode 100644
index 0000000..758b734
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go
@@ -0,0 +1,30 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package user
+
+import (
+ "io"
+ "os"
+)
+
+// Unix-specific path to the passwd and group formatted files.
+const (
+ unixPasswdPath = "/etc/passwd"
+ unixGroupPath = "/etc/group"
+)
+
+func GetPasswdPath() (string, error) {
+ return unixPasswdPath, nil
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return os.Open(unixPasswdPath)
+}
+
+func GetGroupPath() (string, error) {
+ return unixGroupPath, nil
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return os.Open(unixGroupPath)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go
new file mode 100644
index 0000000..7217948
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go
@@ -0,0 +1,21 @@
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package user
+
+import "io"
+
+func GetPasswdPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
+
+func GetGroupPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go
new file mode 100644
index 0000000..13226db
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go
@@ -0,0 +1,407 @@
+package user
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ minId = 0
+ maxId = 1<<31 - 1 //for 32-bit systems compatibility
+)
+
+var (
+ ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId)
+)
+
+type User struct {
+ Name string
+ Pass string
+ Uid int
+ Gid int
+ Gecos string
+ Home string
+ Shell string
+}
+
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ List []string
+}
+
+func parseLine(line string, v ...interface{}) {
+ if line == "" {
+ return
+ }
+
+ parts := strings.Split(line, ":")
+ for i, p := range parts {
+ if len(v) <= i {
+ // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files
+ break
+ }
+
+ switch e := v[i].(type) {
+ case *string:
+ // "root", "adm", "/bin/bash"
+ *e = p
+ case *int:
+ // "0", "4", "1000"
+ // ignore string to int conversion errors, for great "tolerance" of naughty configuration files
+ *e, _ = strconv.Atoi(p)
+ case *[]string:
+ // "", "root", "root,adm,daemon"
+ if p != "" {
+ *e = strings.Split(p, ",")
+ } else {
+ *e = []string{}
+ }
+ default:
+ // panic, because this is a programming/logic error, not a runtime one
+ panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!")
+ }
+ }
+}
+
+func ParsePasswdFile(path string) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswd(passwd)
+}
+
+func ParsePasswd(passwd io.Reader) ([]User, error) {
+ return ParsePasswdFilter(passwd, nil)
+}
+
+func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswdFilter(passwd, filter)
+}
+
+func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for passwd-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []User{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 passwd
+ // name:password:UID:GID:GECOS:directory:shell
+ // Name:Pass:Uid:Gid:Gecos:Home:Shell
+ // root:x:0:0:root:/root:/bin/bash
+ // adm:x:3:4:adm:/var/adm:/bin/false
+ p := User{}
+ parseLine(
+ text,
+ &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell,
+ )
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+func ParseGroupFile(path string) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroup(group)
+}
+
+func ParseGroup(group io.Reader) ([]Group, error) {
+ return ParseGroupFilter(group, nil)
+}
+
+func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroupFilter(group, filter)
+}
+
+func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for group-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []Group{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 group
+ // group_name:password:GID:user_list
+ // Name:Pass:Gid:List
+ // root:x:0:root
+ // adm:x:4:root,adm,daemon
+ p := Group{}
+ parseLine(
+ text,
+ &p.Name, &p.Pass, &p.Gid, &p.List,
+ )
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+type ExecUser struct {
+ Uid, Gid int
+ Sgids []int
+ Home string
+}
+
+// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
+// given file paths and uses that data as the arguments to GetExecUser. If the
+// files cannot be opened for any reason, the error is ignored and a nil
+// io.Reader is passed instead.
+func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
+ passwd, err := os.Open(passwdPath)
+ if err != nil {
+ passwd = nil
+ } else {
+ defer passwd.Close()
+ }
+
+ group, err := os.Open(groupPath)
+ if err != nil {
+ group = nil
+ } else {
+ defer group.Close()
+ }
+
+ return GetExecUser(userSpec, defaults, passwd, group)
+}
+
+// GetExecUser parses a user specification string (using the passwd and group
+// readers as sources for /etc/passwd and /etc/group data, respectively). In
+// the case of blank fields or missing data from the sources, the values in
+// defaults is used.
+//
+// GetExecUser will return an error if a user or group literal could not be
+// found in any entry in passwd and group respectively.
+//
+// Examples of valid user specifications are:
+// * ""
+// * "user"
+// * "uid"
+// * "user:group"
+// * "uid:gid
+// * "user:gid"
+// * "uid:group"
+func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
+ var (
+ userArg, groupArg string
+ name string
+ )
+
+ if defaults == nil {
+ defaults = new(ExecUser)
+ }
+
+ // Copy over defaults.
+ user := &ExecUser{
+ Uid: defaults.Uid,
+ Gid: defaults.Gid,
+ Sgids: defaults.Sgids,
+ Home: defaults.Home,
+ }
+
+ // Sgids slice *cannot* be nil.
+ if user.Sgids == nil {
+ user.Sgids = []int{}
+ }
+
+ // allow for userArg to have either "user" syntax, or optionally "user:group" syntax
+ parseLine(userSpec, &userArg, &groupArg)
+
+ users, err := ParsePasswdFilter(passwd, func(u User) bool {
+ if userArg == "" {
+ return u.Uid == user.Uid
+ }
+ return u.Name == userArg || strconv.Itoa(u.Uid) == userArg
+ })
+ if err != nil && passwd != nil {
+ if userArg == "" {
+ userArg = strconv.Itoa(user.Uid)
+ }
+ return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err)
+ }
+
+ haveUser := users != nil && len(users) > 0
+ if haveUser {
+ // if we found any user entries that matched our filter, let's take the first one as "correct"
+ name = users[0].Name
+ user.Uid = users[0].Uid
+ user.Gid = users[0].Gid
+ user.Home = users[0].Home
+ } else if userArg != "" {
+ // we asked for a user but didn't find them... let's check to see if we wanted a numeric user
+ user.Uid, err = strconv.Atoi(userArg)
+ if err != nil {
+ // not numeric - we have to bail
+ return nil, fmt.Errorf("Unable to find user %v", userArg)
+ }
+
+ // Must be inside valid uid range.
+ if user.Uid < minId || user.Uid > maxId {
+ return nil, ErrRange
+ }
+
+ // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit
+ }
+
+ if groupArg != "" || name != "" {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ // Explicit group format takes precedence.
+ if groupArg != "" {
+ return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg
+ }
+
+ // Check if user is a member.
+ for _, u := range g.List {
+ if u == name {
+ return true
+ }
+ }
+
+ return false
+ })
+ if err != nil && group != nil {
+ return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
+ }
+
+ haveGroup := groups != nil && len(groups) > 0
+ if groupArg != "" {
+ if haveGroup {
+ // if we found any group entries that matched our filter, let's take the first one as "correct"
+ user.Gid = groups[0].Gid
+ } else {
+ // we asked for a group but didn't find id... let's check to see if we wanted a numeric group
+ user.Gid, err = strconv.Atoi(groupArg)
+ if err != nil {
+ // not numeric - we have to bail
+ return nil, fmt.Errorf("Unable to find group %v", groupArg)
+ }
+
+ // Ensure gid is inside gid range.
+ if user.Gid < minId || user.Gid > maxId {
+ return nil, ErrRange
+ }
+
+ // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit
+ }
+ } else if haveGroup {
+ // If implicit group format, fill supplementary gids.
+ user.Sgids = make([]int, len(groups))
+ for i, group := range groups {
+ user.Sgids[i] = group.Gid
+ }
+ }
+ }
+
+ return user, nil
+}
+
+// GetAdditionalGroupsPath looks up a list of groups by name or group id
+// against the group file. If a group name cannot be found, an error will be
+// returned. If a group id cannot be found, it will be returned as-is.
+func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
+ groupReader, err := os.Open(groupPath)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to open group file: %v", err)
+ }
+ defer groupReader.Close()
+
+ groups, err := ParseGroupFilter(groupReader, func(g Group) bool {
+ for _, ag := range additionalGroups {
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ }
+
+ gidMap := make(map[int]struct{})
+ for _, ag := range additionalGroups {
+ var found bool
+ for _, g := range groups {
+ // if we found a matched group either by name or gid, take the
+ // first matched as correct
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ if _, ok := gidMap[g.Gid]; !ok {
+ gidMap[g.Gid] = struct{}{}
+ found = true
+ break
+ }
+ }
+ }
+ // we asked for a group but didn't find it. let's check to see
+ // if we wanted a numeric group
+ if !found {
+ gid, err := strconv.Atoi(ag)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find group %s", ag)
+ }
+ // Ensure gid is inside gid range.
+ if gid < minId || gid > maxId {
+ return nil, ErrRange
+ }
+ gidMap[gid] = struct{}{}
+ }
+ }
+ gids := []int{}
+ for gid := range gidMap {
+ gids = append(gids, gid)
+ }
+ return gids, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE
new file mode 100644
index 0000000..0e5fb87
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md
new file mode 100644
index 0000000..c60a31b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md
@@ -0,0 +1,7 @@
+context
+=======
+[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
+
+gorilla/context is a general purpose registry for global request variables.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go
new file mode 100644
index 0000000..81cb128
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "net/http"
+ "sync"
+ "time"
+)
+
+var (
+ mutex sync.RWMutex
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+)
+
+// Set stores a value for a given key in a given request.
+func Set(r *http.Request, key, val interface{}) {
+ mutex.Lock()
+ if data[r] == nil {
+ data[r] = make(map[interface{}]interface{})
+ datat[r] = time.Now().Unix()
+ }
+ data[r][key] = val
+ mutex.Unlock()
+}
+
+// Get returns a value stored for a given key in a given request.
+func Get(r *http.Request, key interface{}) interface{} {
+ mutex.RLock()
+ if ctx := data[r]; ctx != nil {
+ value := ctx[key]
+ mutex.RUnlock()
+ return value
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetOk returns stored value and presence state like multi-value return of map access.
+func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
+ mutex.RLock()
+ if _, ok := data[r]; ok {
+ value, ok := data[r][key]
+ mutex.RUnlock()
+ return value, ok
+ }
+ mutex.RUnlock()
+ return nil, false
+}
+
+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
+func GetAll(r *http.Request) map[interface{}]interface{} {
+ mutex.RLock()
+ if context, ok := data[r]; ok {
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
+ mutex.RLock()
+ context, ok := data[r]
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result, ok
+}
+
+// Delete removes a value stored for a given key in a given request.
+func Delete(r *http.Request, key interface{}) {
+ mutex.Lock()
+ if data[r] != nil {
+ delete(data[r], key)
+ }
+ mutex.Unlock()
+}
+
+// Clear removes all values stored for a given request.
+//
+// This is usually called by a handler wrapper to clean up request
+// variables at the end of a request lifetime. See ClearHandler().
+func Clear(r *http.Request) {
+ mutex.Lock()
+ clear(r)
+ mutex.Unlock()
+}
+
+// clear is Clear without the lock.
+func clear(r *http.Request) {
+ delete(data, r)
+ delete(datat, r)
+}
+
+// Purge removes request data stored for longer than maxAge, in seconds.
+// It returns the amount of requests removed.
+//
+// If maxAge <= 0, all request data is removed.
+//
+// This is only used for sanity check: in case context cleaning was not
+// properly set some request data can be kept forever, consuming an increasing
+// amount of memory. In case this is detected, Purge() must be called
+// periodically until the problem is fixed.
+func Purge(maxAge int) int {
+ mutex.Lock()
+ count := 0
+ if maxAge <= 0 {
+ count = len(data)
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+ } else {
+ min := time.Now().Unix() - int64(maxAge)
+ for r := range data {
+ if datat[r] < min {
+ clear(r)
+ count++
+ }
+ }
+ }
+ mutex.Unlock()
+ return count
+}
+
+// ClearHandler wraps an http.Handler and clears request values at the end
+// of a request lifetime.
+func ClearHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer Clear(r)
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go
new file mode 100644
index 0000000..73c7400
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go
@@ -0,0 +1,82 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package context stores values shared during a request lifetime.
+
+For example, a router can set variables extracted from the URL and later
+application handlers can access those values, or it can be used to store
+sessions values to be saved at the end of a request. There are several
+others common uses.
+
+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
+
+ http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
+
+Here's the basic usage: first define the keys that you will need. The key
+type is interface{} so a key can be of any type that supports equality.
+Here we define a key using a custom int type to avoid name collisions:
+
+ package foo
+
+ import (
+ "github.com/gorilla/context"
+ )
+
+ type key int
+
+ const MyKey key = 0
+
+Then set a variable. Variables are bound to an http.Request object, so you
+need a request instance to set a value:
+
+ context.Set(r, MyKey, "bar")
+
+The application can later access the variable using the same key you provided:
+
+ func MyHandler(w http.ResponseWriter, r *http.Request) {
+ // val is "bar".
+ val := context.Get(r, foo.MyKey)
+
+ // returns ("bar", true)
+ val, ok := context.GetOk(r, foo.MyKey)
+ // ...
+ }
+
+And that's all about the basic usage. We discuss some other ideas below.
+
+Any type can be stored in the context. To enforce a given type, make the key
+private and wrap Get() and Set() to accept and return values of a specific
+type:
+
+ type key int
+
+ const mykey key = 0
+
+ // GetMyKey returns a value for this package from the request values.
+ func GetMyKey(r *http.Request) SomeType {
+ if rv := context.Get(r, mykey); rv != nil {
+ return rv.(SomeType)
+ }
+ return nil
+ }
+
+ // SetMyKey sets a value for this package in the request values.
+ func SetMyKey(r *http.Request, val SomeType) {
+ context.Set(r, mykey, val)
+ }
+
+Variables must be cleared at the end of a request, to remove all values
+that were stored. This can be done in an http.Handler, after a request was
+served. Just call Clear() passing the request:
+
+ context.Clear(r)
+
+...or use ClearHandler(), which conveniently wraps an http.Handler to clear
+variables at the end of a request lifetime.
+
+The Routers from the packages gorilla/mux and gorilla/pat call Clear()
+so if you are using either of them you don't need to clear the context manually.
+*/
+package context
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000..0e5fb87
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..9a046ff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md
@@ -0,0 +1,235 @@
+mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie)
+[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux)
+
+Package gorilla/mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+ * Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ * URL hosts and paths can have variables with an optional regular
+ expression.
+ * Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ * Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ * It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+ }
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/products/{key}", ProductHandler)
+ r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+ vars := mux.Vars(request)
+ category := vars["category"]
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+ r := mux.NewRouter()
+ // Only matches if domain is "www.example.com".
+ r.Host("www.example.com")
+ // Matches a dynamic subdomain.
+ r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+ r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+ r.Methods("GET", "POST")
+
+...or URL schemes:
+
+ r.Schemes("https")
+
+...or header values:
+
+ r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+ r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+ r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+ })
+
+...and finally, it is possible to combine several matchers in a single route:
+
+ r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is `www.example.com`. Create a route for that host and get a "subrouter"
+from it:
+
+ r := mux.NewRouter()
+ s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+ s.HandleFunc("/products/", ProductsHandler)
+ s.HandleFunc("/products/{key}", ProductHandler)
+ s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+`www.example.com`, because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+ r := mux.NewRouter()
+ s := r.PathPrefix("/products").Subrouter()
+ // "/products/"
+ s.HandleFunc("/", ProductsHandler)
+ // "/products/{key}/"
+ s.HandleFunc("/{key}/", ProductHandler)
+ // "/products/{key}/details"
+ s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+ r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+## Full Example
+
+Here's a complete, runnable example of a small mux based server:
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+ r := mux.NewRouter()
+ // Routes consist of a path and a handler function.
+ r.HandleFunc("/", YourHandler)
+
+ // Bind to a port and pass our router in
+ http.ListenAndServe(":8000", r)
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..49798cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go
@@ -0,0 +1,206 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package gorilla/mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+ * Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ * URL hosts and paths can have variables with an optional regular
+ expression.
+ * Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ * Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ * It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+ }
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/products/{key}", ProductHandler)
+ r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+ vars := mux.Vars(request)
+ category := vars["category"]
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+ r := mux.NewRouter()
+ // Only matches if domain is "www.example.com".
+ r.Host("www.example.com")
+ // Matches a dynamic subdomain.
+ r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+ r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+ r.Methods("GET", "POST")
+
+...or URL schemes:
+
+ r.Schemes("https")
+
+...or header values:
+
+ r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+ r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+ r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+ })
+
+...and finally, it is possible to combine several matchers in a single route:
+
+ r.HandleFunc("/products", ProductsHandler).
+ Host("www.example.com").
+ Methods("GET").
+ Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+ r := mux.NewRouter()
+ s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+ s.HandleFunc("/products/", ProductsHandler)
+ s.HandleFunc("/products/{key}", ProductHandler)
+ s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+ r := mux.NewRouter()
+ s := r.PathPrefix("/products").Subrouter()
+ // "/products/"
+ s.HandleFunc("/", ProductsHandler)
+ // "/products/{key}/"
+ s.HandleFunc("/{key}/", ProductHandler)
+ // "/products/{key}/details"
+ s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+ r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+*/
+package mux
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..b32e1a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go
@@ -0,0 +1,469 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "regexp"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+ return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+// var router = mux.NewRouter()
+//
+// func main() {
+// http.Handle("/", router)
+// }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+// func init() {
+// http.Handle("/", router)
+// }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+ // Configurable Handler to be used when no route matches.
+ NotFoundHandler http.Handler
+ // Parent route, if this is a subrouter.
+ parent parentRoute
+ // Routes to be matched, in order.
+ routes []*Route
+ // Routes by name for URL building.
+ namedRoutes map[string]*Route
+ // See Router.StrictSlash(). This defines the flag for new routes.
+ strictSlash bool
+ // If true, do not clear the request context after handling the request
+ KeepContext bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+ for _, route := range r.routes {
+ if route.Match(req, match) {
+ return true
+ }
+ }
+ return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(req.URL.Path); p != req.URL.Path {
+
+ // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query.
+ // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
+ // http://code.google.com/p/go/issues/detail?id=5252
+ url := *req.URL
+ url.Path = p
+ p = url.String()
+
+ w.Header().Set("Location", p)
+ w.WriteHeader(http.StatusMovedPermanently)
+ return
+ }
+ var match RouteMatch
+ var handler http.Handler
+ if r.Match(req, &match) {
+ handler = match.Handler
+ setVars(req, match.Vars)
+ setCurrentRoute(req, match.Route)
+ }
+ if handler == nil {
+ handler = r.NotFoundHandler
+ if handler == nil {
+ handler = http.NotFoundHandler()
+ }
+ }
+ if !r.KeepContext {
+ defer context.Clear(req)
+ }
+ handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.strictSlash = value
+ return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+ if r.namedRoutes == nil {
+ if r.parent != nil {
+ r.namedRoutes = r.parent.getNamedRoutes()
+ } else {
+ r.namedRoutes = make(map[string]*Route)
+ }
+ }
+ return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+ if r.parent != nil {
+ return r.parent.getRegexpGroup()
+ }
+ return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+ if r.parent != nil {
+ m = r.parent.buildVars(m)
+ }
+ return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+ route := &Route{parent: r, strictSlash: r.strictSlash}
+ r.routes = append(r.routes, route)
+ return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+ return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+ *http.Request)) *Route {
+ return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+ return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+ return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+ return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+ return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+ return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+ return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+ return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+ return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVars registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+ return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+ return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+ for _, t := range r.routes {
+ if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+ continue
+ }
+
+ err := walkFn(t, r, ancestors)
+ if err == SkipRouter {
+ continue
+ }
+ for _, sr := range t.matchers {
+ if h, ok := sr.(*Router); ok {
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if h, ok := t.handler.(*Router); ok {
+ ancestors = append(ancestors, t)
+ err := h.walk(walkFn, ancestors)
+ if err != nil {
+ return err
+ }
+ ancestors = ancestors[:len(ancestors)-1]
+ }
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+}
+
+type contextKey int
+
+const (
+ varsKey contextKey = iota
+ routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+ if rv := context.Get(r, varsKey); rv != nil {
+ return rv.(map[string]string)
+ }
+ return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+ if rv := context.Get(r, routeKey); rv != nil {
+ return rv.(*Route)
+ }
+ return nil
+}
+
+func setVars(r *http.Request, val interface{}) {
+ context.Set(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) {
+ context.Set(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+ return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+ for _, v1 := range s1 {
+ for _, v2 := range s2 {
+ if v1 == v2 {
+ return fmt.Errorf("mux: duplicated route variable %q", v2)
+ }
+ }
+ }
+ return nil
+}
+
+func checkPairs(pairs ...string) (int, error) {
+ length := len(pairs)
+ if length%2 != 0 {
+ return length, fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ }
+ return length, nil
+}
+
+// mapFromPairs converts variadic string parameters to a string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]string, length/2)
+ for i := 0; i < length; i += 2 {
+ m[pairs[i]] = pairs[i+1]
+ }
+ return m, nil
+}
+
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+ length, err := checkPairs(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ m := make(map[string]*regexp.Regexp, length/2)
+ for i := 0; i < length; i += 2 {
+ regex, err := regexp.Compile(pairs[i+1])
+ if err != nil {
+ return nil, err
+ }
+ m[pairs[i]] = regex
+ }
+ return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+ for _, v := range arr {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != "" {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v == value {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != nil {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v.MatchString(value) {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..06728dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,317 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := braceIndices(tpl)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ // Backup the original.
+ template := tpl
+ // Now let's parse it.
+ defaultPattern := "[^/]+"
+ if matchQuery {
+ defaultPattern = "[^?&]*"
+ } else if matchHost {
+ defaultPattern = "[^.]+"
+ matchPrefix = false
+ }
+ // Only match strict slash if not matching
+ if matchPrefix || matchHost || matchQuery {
+ strictSlash = false
+ }
+ // Set a flag for strictSlash.
+ endSlash := false
+ if strictSlash && strings.HasSuffix(tpl, "/") {
+ tpl = tpl[:len(tpl)-1]
+ endSlash = true
+ }
+ varsN := make([]string, len(idxs)/2)
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+ pattern.WriteByte('^')
+ reverse := bytes.NewBufferString("")
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+ name := parts[0]
+ patt := defaultPattern
+ if len(parts) == 2 {
+ patt = parts[1]
+ }
+ // Name or pattern can't be empty.
+ if name == "" || patt == "" {
+ return nil, fmt.Errorf("mux: missing name or pattern in %q",
+ tpl[idxs[i]:end])
+ }
+ // Build the regexp pattern.
+ varIdx := i / 2
+ fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt)
+ // Build the reverse template.
+ fmt.Fprintf(reverse, "%s%%s", raw)
+
+ // Append variable name and compiled pattern.
+ varsN[varIdx] = name
+ varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Add the remaining.
+ raw := tpl[end:]
+ pattern.WriteString(regexp.QuoteMeta(raw))
+ if strictSlash {
+ pattern.WriteString("[/]?")
+ }
+ if matchQuery {
+ // Add the default pattern if the query value is empty
+ if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+ pattern.WriteString(defaultPattern)
+ }
+ }
+ if !matchPrefix {
+ pattern.WriteByte('$')
+ }
+ reverse.WriteString(raw)
+ if endSlash {
+ reverse.WriteByte('/')
+ }
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+ // Done!
+ return &routeRegexp{
+ template: template,
+ matchHost: matchHost,
+ matchQuery: matchQuery,
+ strictSlash: strictSlash,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ }, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+ // The unmodified template.
+ template string
+ // True for host match, false for path or query string match.
+ matchHost bool
+ // True for query string match, false for path and host match.
+ matchQuery bool
+ // The strictSlash value defined on the route, but disabled if PathPrefix was used.
+ strictSlash bool
+ // Expanded regexp.
+ regexp *regexp.Regexp
+ // Reverse template.
+ reverse string
+ // Variable names.
+ varsN []string
+ // Variable regexps (validators).
+ varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+ if !r.matchHost {
+ if r.matchQuery {
+ return r.matchQueryString(req)
+ } else {
+ return r.regexp.MatchString(req.URL.Path)
+ }
+ }
+ return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+ urlValues := make([]interface{}, len(r.varsN))
+ for k, v := range r.varsN {
+ value, ok := values[v]
+ if !ok {
+ return "", fmt.Errorf("mux: missing route variable %q", v)
+ }
+ urlValues[k] = value
+ }
+ rv := fmt.Sprintf(r.reverse, urlValues...)
+ if !r.regexp.MatchString(rv) {
+ // The URL is checked against the full regexp, instead of checking
+ // individual variables. This is faster but to provide a good error
+ // message, we check individual regexps if the URL doesn't match.
+ for k, v := range r.varsN {
+ if !r.varsR[k].MatchString(values[v]) {
+ return "", fmt.Errorf(
+ "mux: variable %q doesn't match, expected %q", values[v],
+ r.varsR[k].String())
+ }
+ }
+ }
+ return rv, nil
+}
+
+// getUrlQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getUrlQuery(req *http.Request) string {
+ if !r.matchQuery {
+ return ""
+ }
+ templateKey := strings.SplitN(r.template, "=", 2)[0]
+ for key, vals := range req.URL.Query() {
+ if key == templateKey && len(vals) > 0 {
+ return key + "=" + vals[0]
+ }
+ }
+ return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+ return r.regexp.MatchString(r.getUrlQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+ var level, idx int
+ idxs := make([]int, 0)
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '{':
+ if level++; level == 1 {
+ idx = i
+ }
+ case '}':
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ }
+ }
+ if level != 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+ return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+ host *routeRegexp
+ path *routeRegexp
+ queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+ // Store host variables.
+ if v.host != nil {
+ hostVars := v.host.regexp.FindStringSubmatch(getHost(req))
+ if hostVars != nil {
+ subexpNames := v.host.regexp.SubexpNames()
+ varName := 0
+ for i, name := range subexpNames[1:] {
+ if name != "" && name == varGroupName(varName) {
+ m.Vars[v.host.varsN[varName]] = hostVars[i+1]
+ varName++
+ }
+ }
+ }
+ }
+ // Store path variables.
+ if v.path != nil {
+ pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path)
+ if pathVars != nil {
+ subexpNames := v.path.regexp.SubexpNames()
+ varName := 0
+ for i, name := range subexpNames[1:] {
+ if name != "" && name == varGroupName(varName) {
+ m.Vars[v.path.varsN[varName]] = pathVars[i+1]
+ varName++
+ }
+ }
+ // Check if we should redirect.
+ if v.path.strictSlash {
+ p1 := strings.HasSuffix(req.URL.Path, "/")
+ p2 := strings.HasSuffix(v.path.template, "/")
+ if p1 != p2 {
+ u, _ := url.Parse(req.URL.String())
+ if p1 {
+ u.Path = u.Path[:len(u.Path)-1]
+ } else {
+ u.Path += "/"
+ }
+ m.Handler = http.RedirectHandler(u.String(), 301)
+ }
+ }
+ }
+ }
+ // Store query string variables.
+ for _, q := range v.queries {
+ queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req))
+ if queryVars != nil {
+ subexpNames := q.regexp.SubexpNames()
+ varName := 0
+ for i, name := range subexpNames[1:] {
+ if name != "" && name == varGroupName(varName) {
+ m.Vars[q.varsN[varName]] = queryVars[i+1]
+ varName++
+ }
+ }
+ }
+ }
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+ if r.URL.IsAbs() {
+ return r.URL.Host
+ }
+ host := r.Host
+ // Slice off any port information.
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ return host
+
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..8901304
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go
@@ -0,0 +1,603 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+ // Parent where the route was registered (a Router).
+ parent parentRoute
+ // Request handler for the route.
+ handler http.Handler
+ // List of matchers.
+ matchers []matcher
+ // Manager for the variables from host and path.
+ regexp *routeRegexpGroup
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+ // If true, this route never matches: it is only used to build URLs.
+ buildOnly bool
+ // The name used to build URLs.
+ name string
+ // Error resulted from building a route.
+ err error
+
+ buildVarsFunc BuildVarsFunc
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+ if r.buildOnly || r.err != nil {
+ return false
+ }
+ // Match everything.
+ for _, m := range r.matchers {
+ if matched := m.Match(req, match); !matched {
+ return false
+ }
+ }
+ // Yay, we have a match. Let's collect some info about it.
+ if match.Route == nil {
+ match.Route = r
+ }
+ if match.Handler == nil {
+ match.Handler = r.handler
+ }
+ if match.Vars == nil {
+ match.Vars = make(map[string]string)
+ }
+ // Set variables.
+ if r.regexp != nil {
+ r.regexp.setMatch(req, match, r)
+ }
+ return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+ return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+ r.buildOnly = true
+ return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+ if r.err == nil {
+ r.handler = handler
+ }
+ return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+ return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+ return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+ if r.name != "" {
+ r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+ r.name, name)
+ }
+ if r.err == nil {
+ r.name = name
+ r.getNamedRoutes()[name] = r
+ }
+ return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+ return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+ Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+ if r.err == nil {
+ r.matchers = append(r.matchers, m)
+ }
+ return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+ if r.err != nil {
+ return r.err
+ }
+ r.regexp = r.getRegexpGroup()
+ if !matchHost && !matchQuery {
+ if len(tpl) == 0 || tpl[0] != '/' {
+ return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+ }
+ if r.regexp.path != nil {
+ tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+ }
+ }
+ rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)
+ if err != nil {
+ return err
+ }
+ for _, q := range r.regexp.queries {
+ if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+ return err
+ }
+ }
+ if matchHost {
+ if r.regexp.path != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+ return err
+ }
+ }
+ r.regexp.host = rr
+ } else {
+ if r.regexp.host != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+ return err
+ }
+ }
+ if matchQuery {
+ r.regexp.queries = append(r.regexp.queries, rr)
+ } else {
+ r.regexp.path = rr
+ }
+ }
+ r.addMatcher(rr)
+ return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+// r := mux.NewRouter()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// Alternatively, you can provide a regular expression and match the header as follows:
+//
+// r.Headers("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will the same as the previous example, with the addition of matching
+// application/text as well.
+//
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]string
+ headers, r.err = mapFromPairsToString(pairs...)
+ return r.addMatcher(headerMatcher(headers))
+ }
+ return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMapWithRegex(m, r.Header, true)
+}
+
+// Regular expressions can be used with headers as well.
+// It accepts a sequence of key/value pairs, where the value has regex support. For example
+// r := mux.NewRouter()
+// r.HeadersRegexp("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]*regexp.Regexp
+ headers, r.err = mapFromPairsToRegex(pairs...)
+ return r.addMatcher(headerRegexMatcher(headers))
+ }
+ return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Host("www.example.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, true, false, false)
+ return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+ return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+ return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+ for k, v := range methods {
+ methods[k] = strings.ToUpper(v)
+ }
+ return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, false, false, false)
+ return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, false, true, false)
+ return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+// r := mux.NewRouter()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+ length := len(pairs)
+ if length%2 != 0 {
+ r.err = fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ return nil
+ }
+ for i := 0; i < length; i += 2 {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+ return r
+ }
+ }
+
+ return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+ for k, v := range schemes {
+ schemes[k] = strings.ToLower(v)
+ }
+ return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+ r.buildVarsFunc = f
+ return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+// r := mux.NewRouter()
+// s := r.Host("www.example.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+ router := &Router{parent: r, strictSlash: r.strictSlash}
+ r.addMatcher(router)
+ return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// ...a URL for it can be built using:
+//
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+// "/articles/technology/42"
+//
+// This also works for host variables:
+//
+// r := mux.NewRouter()
+// r.Host("{subdomain}.domain.com").
+// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil {
+ return nil, errors.New("mux: route doesn't have a host or path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ var scheme, host, path string
+ if r.regexp.host != nil {
+ // Set a default scheme.
+ scheme = "http"
+ if host, err = r.regexp.host.url(values); err != nil {
+ return nil, err
+ }
+ }
+ if r.regexp.path != nil {
+ if path, err = r.regexp.path.url(values); err != nil {
+ return nil, err
+ }
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ }, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.host == nil {
+ return nil, errors.New("mux: route doesn't have a host")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ host, err := r.regexp.host.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host,
+ }, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return nil, errors.New("mux: route doesn't have a path")
+ }
+ values, err := r.prepareVars(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ path, err := r.regexp.path.url(values)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Path: path,
+ }, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+ m, err := mapFromPairsToString(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+ if r.parent != nil {
+ m = r.parent.buildVars(m)
+ }
+ if r.buildVarsFunc != nil {
+ m = r.buildVarsFunc(m)
+ }
+ return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+ getNamedRoutes() map[string]*Route
+ getRegexpGroup() *routeRegexpGroup
+ buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+ if r.regexp == nil {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ regexp := r.parent.getRegexpGroup()
+ if regexp == nil {
+ r.regexp = new(routeRegexpGroup)
+ } else {
+ // Copy.
+ r.regexp = &routeRegexpGroup{
+ host: regexp.host,
+ path: regexp.path,
+ queries: regexp.queries,
+ }
+ }
+ }
+ return r.regexp
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
new file mode 100644
index 0000000..edbe200
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
@@ -0,0 +1,2 @@
+Tianon Gravi (@tianon)
+Aleksa Sarai (@cyphar)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go
new file mode 100644
index 0000000..6f8a982
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go
@@ -0,0 +1,108 @@
+package user
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+)
+
+var (
+ // The current operating system does not provide the required data for user lookups.
+ ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
+)
+
+func lookupUser(filter func(u User) bool) (User, error) {
+ // Get operating system-specific passwd reader-closer.
+ passwd, err := GetPasswd()
+ if err != nil {
+ return User{}, err
+ }
+ defer passwd.Close()
+
+ // Get the users.
+ users, err := ParsePasswdFilter(passwd, filter)
+ if err != nil {
+ return User{}, err
+ }
+
+ // No user entries found.
+ if len(users) == 0 {
+ return User{}, fmt.Errorf("no matching entries in passwd file")
+ }
+
+ // Assume the first entry is the "correct" one.
+ return users[0], nil
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(syscall.Getuid())
+}
+
+// LookupUser looks up a user by their username in /etc/passwd. If the user
+// cannot be found (or there is no /etc/passwd file on the filesystem), then
+// LookupUser returns an error.
+func LookupUser(username string) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Name == username
+ })
+}
+
+// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
+// be found (or there is no /etc/passwd file on the filesystem), then LookupId
+// returns an error.
+func LookupUid(uid int) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Uid == uid
+ })
+}
+
+func lookupGroup(filter func(g Group) bool) (Group, error) {
+ // Get operating system-specific group reader-closer.
+ group, err := GetGroup()
+ if err != nil {
+ return Group{}, err
+ }
+ defer group.Close()
+
+ // Get the users.
+ groups, err := ParseGroupFilter(group, filter)
+ if err != nil {
+ return Group{}, err
+ }
+
+ // No user entries found.
+ if len(groups) == 0 {
+ return Group{}, fmt.Errorf("no matching entries in group file")
+ }
+
+ // Assume the first entry is the "correct" one.
+ return groups[0], nil
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(syscall.Getgid())
+}
+
+// LookupGroup looks up a group by its name in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGroup
+// returns an error.
+func LookupGroup(groupname string) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Name == groupname
+ })
+}
+
+// LookupGid looks up a group by its group id in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGid
+// returns an error.
+func LookupGid(gid int) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Gid == gid
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
new file mode 100644
index 0000000..758b734
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -0,0 +1,30 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package user
+
+import (
+ "io"
+ "os"
+)
+
+// Unix-specific path to the passwd and group formatted files.
+const (
+ unixPasswdPath = "/etc/passwd"
+ unixGroupPath = "/etc/group"
+)
+
+func GetPasswdPath() (string, error) {
+ return unixPasswdPath, nil
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return os.Open(unixPasswdPath)
+}
+
+func GetGroupPath() (string, error) {
+ return unixGroupPath, nil
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return os.Open(unixGroupPath)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
new file mode 100644
index 0000000..7217948
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
@@ -0,0 +1,21 @@
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package user
+
+import "io"
+
+func GetPasswdPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
+
+func GetGroupPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go
new file mode 100644
index 0000000..964e31b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go
@@ -0,0 +1,413 @@
+package user
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ minId = 0
+ maxId = 1<<31 - 1 //for 32-bit systems compatibility
+)
+
+var (
+ ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId)
+)
+
+type User struct {
+ Name string
+ Pass string
+ Uid int
+ Gid int
+ Gecos string
+ Home string
+ Shell string
+}
+
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ List []string
+}
+
+func parseLine(line string, v ...interface{}) {
+ if line == "" {
+ return
+ }
+
+ parts := strings.Split(line, ":")
+ for i, p := range parts {
+ if len(v) <= i {
+ // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files
+ break
+ }
+
+ switch e := v[i].(type) {
+ case *string:
+ // "root", "adm", "/bin/bash"
+ *e = p
+ case *int:
+ // "0", "4", "1000"
+ // ignore string to int conversion errors, for great "tolerance" of naughty configuration files
+ *e, _ = strconv.Atoi(p)
+ case *[]string:
+ // "", "root", "root,adm,daemon"
+ if p != "" {
+ *e = strings.Split(p, ",")
+ } else {
+ *e = []string{}
+ }
+ default:
+ // panic, because this is a programming/logic error, not a runtime one
+ panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!")
+ }
+ }
+}
+
+func ParsePasswdFile(path string) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswd(passwd)
+}
+
+func ParsePasswd(passwd io.Reader) ([]User, error) {
+ return ParsePasswdFilter(passwd, nil)
+}
+
+func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswdFilter(passwd, filter)
+}
+
+func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for passwd-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []User{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 passwd
+ // name:password:UID:GID:GECOS:directory:shell
+ // Name:Pass:Uid:Gid:Gecos:Home:Shell
+ // root:x:0:0:root:/root:/bin/bash
+ // adm:x:3:4:adm:/var/adm:/bin/false
+ p := User{}
+ parseLine(
+ text,
+ &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell,
+ )
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+func ParseGroupFile(path string) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroup(group)
+}
+
+func ParseGroup(group io.Reader) ([]Group, error) {
+ return ParseGroupFilter(group, nil)
+}
+
+func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroupFilter(group, filter)
+}
+
+func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for group-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []Group{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 group
+ // group_name:password:GID:user_list
+ // Name:Pass:Gid:List
+ // root:x:0:root
+ // adm:x:4:root,adm,daemon
+ p := Group{}
+ parseLine(
+ text,
+ &p.Name, &p.Pass, &p.Gid, &p.List,
+ )
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+type ExecUser struct {
+ Uid, Gid int
+ Sgids []int
+ Home string
+}
+
+// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
+// given file paths and uses that data as the arguments to GetExecUser. If the
+// files cannot be opened for any reason, the error is ignored and a nil
+// io.Reader is passed instead.
+func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
+ passwd, err := os.Open(passwdPath)
+ if err != nil {
+ passwd = nil
+ } else {
+ defer passwd.Close()
+ }
+
+ group, err := os.Open(groupPath)
+ if err != nil {
+ group = nil
+ } else {
+ defer group.Close()
+ }
+
+ return GetExecUser(userSpec, defaults, passwd, group)
+}
+
+// GetExecUser parses a user specification string (using the passwd and group
+// readers as sources for /etc/passwd and /etc/group data, respectively). In
+// the case of blank fields or missing data from the sources, the values in
+// defaults is used.
+//
+// GetExecUser will return an error if a user or group literal could not be
+// found in any entry in passwd and group respectively.
+//
+// Examples of valid user specifications are:
+// * ""
+// * "user"
+// * "uid"
+// * "user:group"
+// * "uid:gid
+// * "user:gid"
+// * "uid:group"
+func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
+ var (
+ userArg, groupArg string
+ name string
+ )
+
+ if defaults == nil {
+ defaults = new(ExecUser)
+ }
+
+ // Copy over defaults.
+ user := &ExecUser{
+ Uid: defaults.Uid,
+ Gid: defaults.Gid,
+ Sgids: defaults.Sgids,
+ Home: defaults.Home,
+ }
+
+ // Sgids slice *cannot* be nil.
+ if user.Sgids == nil {
+ user.Sgids = []int{}
+ }
+
+ // allow for userArg to have either "user" syntax, or optionally "user:group" syntax
+ parseLine(userSpec, &userArg, &groupArg)
+
+ users, err := ParsePasswdFilter(passwd, func(u User) bool {
+ if userArg == "" {
+ return u.Uid == user.Uid
+ }
+ return u.Name == userArg || strconv.Itoa(u.Uid) == userArg
+ })
+ if err != nil && passwd != nil {
+ if userArg == "" {
+ userArg = strconv.Itoa(user.Uid)
+ }
+ return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err)
+ }
+
+ haveUser := users != nil && len(users) > 0
+ if haveUser {
+ // if we found any user entries that matched our filter, let's take the first one as "correct"
+ name = users[0].Name
+ user.Uid = users[0].Uid
+ user.Gid = users[0].Gid
+ user.Home = users[0].Home
+ } else if userArg != "" {
+ // we asked for a user but didn't find them... let's check to see if we wanted a numeric user
+ user.Uid, err = strconv.Atoi(userArg)
+ if err != nil {
+ // not numeric - we have to bail
+ return nil, fmt.Errorf("Unable to find user %v", userArg)
+ }
+
+ // Must be inside valid uid range.
+ if user.Uid < minId || user.Uid > maxId {
+ return nil, ErrRange
+ }
+
+ // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit
+ }
+
+ if groupArg != "" || name != "" {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ // Explicit group format takes precedence.
+ if groupArg != "" {
+ return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg
+ }
+
+ // Check if user is a member.
+ for _, u := range g.List {
+ if u == name {
+ return true
+ }
+ }
+
+ return false
+ })
+ if err != nil && group != nil {
+ return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
+ }
+
+ haveGroup := groups != nil && len(groups) > 0
+ if groupArg != "" {
+ if haveGroup {
+ // if we found any group entries that matched our filter, let's take the first one as "correct"
+ user.Gid = groups[0].Gid
+ } else {
+ // we asked for a group but didn't find id... let's check to see if we wanted a numeric group
+ user.Gid, err = strconv.Atoi(groupArg)
+ if err != nil {
+ // not numeric - we have to bail
+ return nil, fmt.Errorf("Unable to find group %v", groupArg)
+ }
+
+ // Ensure gid is inside gid range.
+ if user.Gid < minId || user.Gid > maxId {
+ return nil, ErrRange
+ }
+
+ // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit
+ }
+ } else if haveGroup {
+ // If implicit group format, fill supplementary gids.
+ user.Sgids = make([]int, len(groups))
+ for i, group := range groups {
+ user.Sgids[i] = group.Gid
+ }
+ }
+ }
+
+ return user, nil
+}
+
+// GetAdditionalGroups looks up a list of groups by name or group id against
+// against the given /etc/group formatted data. If a group name cannot be found,
+// an error will be returned. If a group id cannot be found, it will be returned
+// as-is.
+func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ for _, ag := range additionalGroups {
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ }
+
+ gidMap := make(map[int]struct{})
+ for _, ag := range additionalGroups {
+ var found bool
+ for _, g := range groups {
+ // if we found a matched group either by name or gid, take the
+ // first matched as correct
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ if _, ok := gidMap[g.Gid]; !ok {
+ gidMap[g.Gid] = struct{}{}
+ found = true
+ break
+ }
+ }
+ }
+ // we asked for a group but didn't find it. let's check to see
+ // if we wanted a numeric group
+ if !found {
+ gid, err := strconv.Atoi(ag)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find group %s", ag)
+ }
+ // Ensure gid is inside gid range.
+ if gid < minId || gid > maxId {
+ return nil, ErrRange
+ }
+ gidMap[gid] = struct{}{}
+ }
+ }
+ gids := []int{}
+ for gid := range gidMap {
+ gids = append(gids, gid)
+ }
+ return gids, nil
+}
+
+// Wrapper around GetAdditionalGroups that opens the groupPath given and gives
+// it as an argument to GetAdditionalGroups.
+func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
+ group, err := os.Open(groupPath)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to open group file: %v", err)
+ }
+ defer group.Close()
+ return GetAdditionalGroups(additionalGroups, group)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go
new file mode 100644
index 0000000..9d21da2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go
@@ -0,0 +1,595 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+)
+
+// APIImages represent an image returned in the ListImages call.
+type APIImages struct {
+ ID string `json:"Id" yaml:"Id"`
+ RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
+ ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"`
+ RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// Image is the type representing a docker image and its various properties
+type Image struct {
+ ID string `json:"Id" yaml:"Id"`
+ Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"`
+ Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"`
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"`
+ DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"`
+ Author string `json:"Author,omitempty" yaml:"Author,omitempty"`
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
+ Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
+}
+
+// ImagePre012 serves the same purpose as the Image type except that it is for
+// earlier versions of the Docker API (pre-012 to be specific)
+type ImagePre012 struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig Config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *Config `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+var (
+ // ErrNoSuchImage is the error returned when the image does not exist.
+ ErrNoSuchImage = errors.New("no such image")
+
+ // ErrMissingRepo is the error returned when the remote repository is
+ // missing.
+ ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'")
+
+ // ErrMissingOutputStream is the error returned when no output stream
+ // is provided to some calls, like BuildImage.
+ ErrMissingOutputStream = errors.New("missing output stream")
+
+ // ErrMultipleContexts is the error returned when both a ContextDir and
+ // InputStream are provided in BuildImageOptions
+ ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream")
+
+ // ErrMustSpecifyNames is the error rreturned when the Names field on
+ // ExportImagesOptions is nil or empty
+ ErrMustSpecifyNames = errors.New("must specify at least one name to export")
+)
+
+// ListImagesOptions specify parameters to the ListImages function.
+//
+// See https://goo.gl/xBe1u3 for more details.
+type ListImagesOptions struct {
+ All bool
+ Filters map[string][]string
+ Digests bool
+}
+
+// ListImages returns the list of available images in the server.
+//
+// See https://goo.gl/xBe1u3 for more details.
+func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
+ path := "/images/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var images []APIImages
+ if err := json.NewDecoder(resp.Body).Decode(&images); err != nil {
+ return nil, err
+ }
+ return images, nil
+}
+
+// ImageHistory represent a layer in an image's history returned by the
+// ImageHistory call.
+type ImageHistory struct {
+ ID string `json:"Id" yaml:"Id"`
+ Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+}
+
+// ImageHistory returns the history of the image by its name or ID.
+//
+// See https://goo.gl/8bnTId for more details.
+func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
+ resp, err := c.do("GET", "/images/"+name+"/history", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var history []ImageHistory
+ if err := json.NewDecoder(resp.Body).Decode(&history); err != nil {
+ return nil, err
+ }
+ return history, nil
+}
+
+// RemoveImage removes an image by its name or ID.
+//
+// See https://goo.gl/V3ZWnK for more details.
+func (c *Client) RemoveImage(name string) error {
+ resp, err := c.do("DELETE", "/images/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveImageOptions present the set of options available for removing an image
+// from a registry.
+//
+// See https://goo.gl/V3ZWnK for more details.
+type RemoveImageOptions struct {
+ Force bool `qs:"force"`
+ NoPrune bool `qs:"noprune"`
+}
+
+// RemoveImageExtended removes an image by its name or ID.
+// Extra params can be passed, see RemoveImageOptions
+//
+// See https://goo.gl/V3ZWnK for more details.
+func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error {
+ uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts))
+ resp, err := c.do("DELETE", uri, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// InspectImage returns an image by its name or ID.
+//
+// See https://goo.gl/jHPcg6 for more details.
+func (c *Client) InspectImage(name string) (*Image, error) {
+ resp, err := c.do("GET", "/images/"+name+"/json", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var image Image
+
+ // if the caller elected to skip checking the server's version, assume it's the latest
+ if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) {
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ } else {
+ var imagePre012 ImagePre012
+ if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil {
+ return nil, err
+ }
+
+ image.ID = imagePre012.ID
+ image.Parent = imagePre012.Parent
+ image.Comment = imagePre012.Comment
+ image.Created = imagePre012.Created
+ image.Container = imagePre012.Container
+ image.ContainerConfig = imagePre012.ContainerConfig
+ image.DockerVersion = imagePre012.DockerVersion
+ image.Author = imagePre012.Author
+ image.Config = imagePre012.Config
+ image.Architecture = imagePre012.Architecture
+ image.Size = imagePre012.Size
+ }
+
+ return &image, nil
+}
+
+// PushImageOptions represents options to use in the PushImage method.
+//
+// See https://goo.gl/zPtZaT for more details.
+type PushImageOptions struct {
+ // Name of the image
+ Name string
+
+ // Tag of the image
+ Tag string
+
+ // Registry server to push the image
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+}
+
+// PushImage pushes an image to a remote registry, logging progress to w.
+//
+// An empty instance of AuthConfiguration may be used for unauthenticated
+// pushes.
+//
+// See https://goo.gl/zPtZaT for more details.
+func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
+ if opts.Name == "" {
+ return ErrNoSuchImage
+ }
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ name := opts.Name
+ opts.Name = ""
+ path := "/images/" + name + "/push?" + queryString(&opts)
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ stdout: opts.OutputStream,
+ })
+}
+
+// PullImageOptions present the set of options available for pulling an image
+// from a registry.
+//
+// See https://goo.gl/iJkZjD for more details.
+type PullImageOptions struct {
+ Repository string `qs:"fromImage"`
+ Registry string
+ Tag string
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+}
+
+// PullImage pulls an image from a remote registry, logging progress to
+// opts.OutputStream.
+//
+// See https://goo.gl/iJkZjD for more details.
+func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream)
+}
+
+func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error {
+ path := "/images/create?" + qs
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: rawJSONStream,
+ headers: headers,
+ in: in,
+ stdout: w,
+ })
+}
+
+// LoadImageOptions represents the options for LoadImage Docker API Call
+//
+// See https://goo.gl/JyClMX for more details.
+type LoadImageOptions struct {
+ InputStream io.Reader
+}
+
+// LoadImage imports a tarball docker image
+//
+// See https://goo.gl/JyClMX for more details.
+func (c *Client) LoadImage(opts LoadImageOptions) error {
+ return c.stream("POST", "/images/load", streamOptions{
+ setRawTerminal: true,
+ in: opts.InputStream,
+ })
+}
+
+// ExportImageOptions represent the options for ExportImage Docker API call.
+//
+// See https://goo.gl/le7vK8 for more details.
+type ExportImageOptions struct {
+ Name string
+ OutputStream io.Writer
+}
+
+// ExportImage exports an image (as a tar file) into the stream.
+//
+// See https://goo.gl/le7vK8 for more details.
+func (c *Client) ExportImage(opts ExportImageOptions) error {
+ return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+// ExportImagesOptions represent the options for ExportImages Docker API call
+//
+// See https://goo.gl/huC7HA for more details.
+type ExportImagesOptions struct {
+ Names []string
+ OutputStream io.Writer `qs:"-"`
+}
+
+// ExportImages exports one or more images (as a tar file) into the stream
+//
+// See https://goo.gl/huC7HA for more details.
+func (c *Client) ExportImages(opts ExportImagesOptions) error {
+ if opts.Names == nil || len(opts.Names) == 0 {
+ return ErrMustSpecifyNames
+ }
+ return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ })
+}
+
+// ImportImageOptions present the set of informations available for importing
+// an image from a source file or the stdin.
+//
+// See https://goo.gl/iJkZjD for more details.
+type ImportImageOptions struct {
+ Repository string `qs:"repo"`
+ Source string `qs:"fromSrc"`
+ Tag string `qs:"tag"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+}
+
+// ImportImage imports an image from a url, a file or stdin
+//
+// See https://goo.gl/iJkZjD for more details.
+func (c *Client) ImportImage(opts ImportImageOptions) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+ if opts.Source != "-" {
+ opts.InputStream = nil
+ }
+ if opts.Source != "-" && !isURL(opts.Source) {
+ f, err := os.Open(opts.Source)
+ if err != nil {
+ return err
+ }
+ opts.InputStream = f
+ opts.Source = "-"
+ }
+ return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream)
+}
+
+// BuildImageOptions present the set of informations available for building an
+// image from a tarfile with a Dockerfile in it.
+//
+// For more details about the Docker building process, see
+// http://goo.gl/tlPXPu.
+type BuildImageOptions struct {
+ Name string `qs:"t"`
+ Dockerfile string `qs:"dockerfile"`
+ NoCache bool `qs:"nocache"`
+ SuppressOutput bool `qs:"q"`
+ Pull bool `qs:"pull"`
+ RmTmpContainer bool `qs:"rm"`
+ ForceRmTmpContainer bool `qs:"forcerm"`
+ Memory int64 `qs:"memory"`
+ Memswap int64 `qs:"memswap"`
+ CPUShares int64 `qs:"cpushares"`
+ CPUSetCPUs string `qs:"cpusetcpus"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ Remote string `qs:"remote"`
+ Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header
+ AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
+ ContextDir string `qs:"-"`
+ Ulimits []ULimit `qs:"-"`
+}
+
+// BuildImage builds an image from a tarball's url or a Dockerfile in the input
+// stream.
+//
+// See https://goo.gl/xySxCe for more details.
+func (c *Client) BuildImage(opts BuildImageOptions) error {
+ if opts.OutputStream == nil {
+ return ErrMissingOutputStream
+ }
+ headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs))
+ if err != nil {
+ return err
+ }
+
+ if opts.Remote != "" && opts.Name == "" {
+ opts.Name = opts.Remote
+ }
+ if opts.InputStream != nil || opts.ContextDir != "" {
+ headers["Content-Type"] = "application/tar"
+ } else if opts.Remote == "" {
+ return ErrMissingRepo
+ }
+ if opts.ContextDir != "" {
+ if opts.InputStream != nil {
+ return ErrMultipleContexts
+ }
+ var err error
+ if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil {
+ return err
+ }
+ }
+
+ qs := queryString(&opts)
+ if len(opts.Ulimits) > 0 {
+ if b, err := json.Marshal(opts.Ulimits); err == nil {
+ item := url.Values(map[string][]string{})
+ item.Add("ulimits", string(b))
+ qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ }
+ }
+
+ return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ })
+}
+
+func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} {
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) {
+ return AuthConfigurations119(authConfigs.Configs)
+ }
+ return authConfigs
+}
+
+// TagImageOptions present the set of options to tag an image.
+//
+// See https://goo.gl/98ZzkU for more details.
+type TagImageOptions struct {
+ Repo string
+ Tag string
+ Force bool
+}
+
+// TagImage adds a tag to the image identified by the given name.
+//
+// See https://goo.gl/98ZzkU for more details.
+func (c *Client) TagImage(name string, opts TagImageOptions) error {
+ if name == "" {
+ return ErrNoSuchImage
+ }
+ resp, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s",
+ queryString(&opts)), doOptions{})
+
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+
+ return err
+}
+
+func isURL(u string) bool {
+ p, err := url.Parse(u)
+ if err != nil {
+ return false
+ }
+ return p.Scheme == "http" || p.Scheme == "https"
+}
+
+func headersWithAuth(auths ...interface{}) (map[string]string, error) {
+ var headers = make(map[string]string)
+
+ for _, auth := range auths {
+ switch auth.(type) {
+ case AuthConfiguration:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ case AuthConfigurations, AuthConfigurations119:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ }
+ }
+
+ return headers, nil
+}
+
+// APIImageSearch reflect the result of a search on the Docker Hub.
+//
+// See https://goo.gl/AYjyrF for more details.
+type APIImageSearch struct {
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"`
+ IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"`
+}
+
+// SearchImages search the docker hub with a specific given term.
+//
+// See https://goo.gl/AYjyrF for more details.
+func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
+ resp, err := c.do("GET", "/images/search?term="+term, doOptions{})
+ defer resp.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+ var searchResult []APIImageSearch
+ if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
+ return nil, err
+ }
+ return searchResult, nil
+}
+
+// SearchImagesEx search the docker hub with a specific given term and authentication.
+//
+// See https://goo.gl/AYjyrF for more details.
+func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) {
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.do("GET", "/images/search?term="+term, doOptions{
+ headers: headers,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ defer resp.Body.Close()
+
+ var searchResult []APIImageSearch
+ if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
+ return nil, err
+ }
+
+ return searchResult, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go
new file mode 100644
index 0000000..34c9653
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go
@@ -0,0 +1,57 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import "strings"
+
+// Version returns version information about the docker server.
+//
+// See https://goo.gl/ND9R8L for more details.
+func (c *Client) Version() (*Env, error) {
+ resp, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var env Env
+ if err := env.Decode(resp.Body); err != nil {
+ return nil, err
+ }
+ return &env, nil
+}
+
+// Info returns system-wide information about the Docker server.
+//
+// See https://goo.gl/ElTHi2 for more details.
+func (c *Client) Info() (*Env, error) {
+ resp, err := c.do("GET", "/info", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var info Env
+ if err := info.Decode(resp.Body); err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+// ParseRepositoryTag gets the name of the repository and returns it splitted
+// in two parts: the repository and the tag.
+//
+// Some examples:
+//
+// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
+// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
+func ParseRepositoryTag(repoTag string) (repository string, tag string) {
+ n := strings.LastIndex(repoTag, ":")
+ if n < 0 {
+ return repoTag, ""
+ }
+ if tag := repoTag[n+1:]; !strings.Contains(tag, "/") {
+ return repoTag[:n], tag
+ }
+ return repoTag, ""
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go
new file mode 100644
index 0000000..8fa7091
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go
@@ -0,0 +1,143 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+)
+
+// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the
+// network already exists.
+var ErrNetworkAlreadyExists = errors.New("network already exists")
+
+// Network represents a network.
+//
+// See https://goo.gl/FDkCdQ for more details.
+type Network struct {
+ Name string `json:"name"`
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Endpoints []*Endpoint `json:"endpoints"`
+}
+
+// Endpoint represents an endpoint.
+//
+// See https://goo.gl/FDkCdQ for more details.
+type Endpoint struct {
+ Name string `json:"name"`
+ ID string `json:"id"`
+ Network string `json:"network"`
+}
+
+// ListNetworks returns all networks.
+//
+// See https://goo.gl/4hCNtZ for more details.
+func (c *Client) ListNetworks() ([]Network, error) {
+ resp, err := c.do("GET", "/networks", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var networks []Network
+ if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil {
+ return nil, err
+ }
+ return networks, nil
+}
+
+// NetworkInfo returns information about a network by its ID.
+//
+// See https://goo.gl/4hCNtZ for more details.
+func (c *Client) NetworkInfo(id string) (*Network, error) {
+ path := "/networks/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchNetwork{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var network Network
+ if err := json.NewDecoder(resp.Body).Decode(&network); err != nil {
+ return nil, err
+ }
+ return &network, nil
+}
+
+// CreateNetworkOptions specify parameters to the CreateNetwork function and
+// (for now) is the expected body of the "create network" http request message
+//
+// See https://goo.gl/FDkCdQ for more details.
+type CreateNetworkOptions struct {
+ Name string `json:"name"`
+ NetworkType string `json:"network_type"`
+ Options map[string]interface{} `json:"options"`
+}
+
+// CreateNetwork creates a new network, returning the network instance,
+// or an error in case of failure.
+//
+// See https://goo.gl/FDkCdQ for more details.
+func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
+ resp, err := c.do(
+ "POST",
+ "/networks",
+ doOptions{
+ data: opts,
+ },
+ )
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusConflict {
+ return nil, ErrNetworkAlreadyExists
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ type createNetworkResponse struct {
+ ID string
+ }
+ var (
+ network Network
+ cnr createNetworkResponse
+ )
+ if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil {
+ return nil, err
+ }
+
+ network.Name = opts.Name
+ network.ID = cnr.ID
+ network.Type = opts.NetworkType
+
+ return &network, nil
+}
+
+// RemoveNetwork removes a network or an error in case of failure.
+//
+// See https://goo.gl/FDkCdQ for more details.
+func (c *Client) RemoveNetwork(id string) error {
+ resp, err := c.do("DELETE", "/networks/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetwork{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// NoSuchNetwork is the error returned when a given network does not exist.
+type NoSuchNetwork struct {
+ ID string
+}
+
+func (err *NoSuchNetwork) Error() string {
+ return fmt.Sprintf("No such network: %s", err.ID)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go
new file mode 100644
index 0000000..16aa003
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go
@@ -0,0 +1,49 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+// Signal represents a signal that can be send to the container on
+// KillContainer call.
+type Signal int
+
+// These values represent all signals available on Linux, where containers will
+// be running.
+const (
+ SIGABRT = Signal(0x6)
+ SIGALRM = Signal(0xe)
+ SIGBUS = Signal(0x7)
+ SIGCHLD = Signal(0x11)
+ SIGCLD = Signal(0x11)
+ SIGCONT = Signal(0x12)
+ SIGFPE = Signal(0x8)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x1d)
+ SIGIOT = Signal(0x6)
+ SIGKILL = Signal(0x9)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x1d)
+ SIGPROF = Signal(0x1b)
+ SIGPWR = Signal(0x1e)
+ SIGQUIT = Signal(0x3)
+ SIGSEGV = Signal(0xb)
+ SIGSTKFLT = Signal(0x10)
+ SIGSTOP = Signal(0x13)
+ SIGSYS = Signal(0x1f)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x14)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGUNUSED = Signal(0x1f)
+ SIGURG = Signal(0x17)
+ SIGUSR1 = Signal(0xa)
+ SIGUSR2 = Signal(0xc)
+ SIGVTALRM = Signal(0x1a)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
new file mode 100644
index 0000000..48042cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
@@ -0,0 +1,117 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
+)
+
+func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
+ excludes, err := parseDockerignore(srcPath)
+ if err != nil {
+ return nil, err
+ }
+
+ includes := []string{"."}
+
+ // If .dockerignore mentions .dockerignore or the Dockerfile
+ // then make sure we send both files over to the daemon
+ // because Dockerfile is, obviously, needed no matter what, and
+ // .dockerignore is needed to know if either one needs to be
+ // removed. The deamon will remove them for us, if needed, after it
+ // parses the Dockerfile.
+ //
+ // https://github.com/docker/docker/issues/8330
+ //
+ forceIncludeFiles := []string{".dockerignore", dockerfilePath}
+
+ for _, includeFile := range forceIncludeFiles {
+ if includeFile == "" {
+ continue
+ }
+ keepThem, err := fileutils.Matches(includeFile, excludes)
+ if err != nil {
+ return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
+ }
+ if keepThem {
+ includes = append(includes, includeFile)
+ }
+ }
+
+ if err := validateContextDirectory(srcPath, excludes); err != nil {
+ return nil, err
+ }
+ tarOpts := &archive.TarOptions{
+ ExcludePatterns: excludes,
+ IncludeFiles: includes,
+ Compression: archive.Uncompressed,
+ NoLchown: true,
+ }
+ return archive.TarWithOptions(srcPath, tarOpts)
+}
+
+// validateContextDirectory checks if all the contents of the directory
+// can be read and returns an error if some files can't be read.
+// Symlinks which point to non-existing files don't trigger an error
+func validateContextDirectory(srcPath string, excludes []string) error {
+ return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
+ // skip this directory/file if it's not in the path, it won't get added to the context
+ if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
+ return err
+ } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
+ return err
+ } else if skip {
+ if f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if err != nil {
+ if os.IsPermission(err) {
+ return fmt.Errorf("can't stat '%s'", filePath)
+ }
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ // skip checking if symlinks point to non-existing files, such symlinks can be useful
+ // also skip named pipes, because they hanging on open
+ if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
+ return nil
+ }
+
+ if !f.IsDir() {
+ currentFile, err := os.Open(filePath)
+ if err != nil && os.IsPermission(err) {
+ return fmt.Errorf("no permission to read from '%s'", filePath)
+ }
+ currentFile.Close()
+ }
+ return nil
+ })
+}
+
+func parseDockerignore(root string) ([]string, error) {
+ var excludes []string
+ ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
+ if err != nil && !os.IsNotExist(err) {
+ return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
+ }
+ excludes = strings.Split(string(ignore), "\n")
+
+ return excludes, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore
new file mode 100644
index 0000000..027e8c2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore
@@ -0,0 +1,3 @@
+container.tar
+dockerfile.tar
+foofile
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
new file mode 100644
index 0000000..0948dcf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
@@ -0,0 +1,15 @@
+# this file describes how to build tsuru python image
+# to run it:
+# 1- install docker
+# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile
+
+from base:ubuntu-quantal
+run apt-get install wget -y --force-yes
+run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate
+run mkdir /var/lib/tsuru
+run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1
+run cp /var/lib/tsuru/python/deploy /var/lib/tsuru
+run cp /var/lib/tsuru/base/restart /var/lib/tsuru
+run cp /var/lib/tsuru/base/start /var/lib/tsuru
+run /var/lib/tsuru/base/install
+run /var/lib/tsuru/base/setup
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem
new file mode 100644
index 0000000..8e38bba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC1TCCAb+gAwIBAgIQJ9MsNxrUxumNbAytGi3GEDALBgkqhkiG9w0BAQswFjEU
+MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTM4WhcNMTcwOTMwMjAy
+MTM4WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBALpFCSARjG+5yXoqr7UMzuE0df7RRZfeRZI06lJ02ZqV4Iii
+rgL7ML9yPxX50NbLnjiilSDTUhnyocYFItokzUzz8qpX/nlYhuN2Iqwh4d0aWS8z
+f5y248F+H1z+HY2W8NPl/6DVlVwYaNW1/k+RPMlHS0INLR6j+3Ievew7RNE0NnM2
+znELW6NetekDt3GUcz0Z95vDUDfdPnIk1eIFMmYvLxZh23xOca4Q37a3S8F3d+dN
++OOpwjdgY9Qme0NQUaXpgp58jWuQfB8q7mZrdnLlLqRa8gx1HeDSotX7UmWtWPkb
+vd9EdlKLYw5PVpxMV1rkwf2t4TdgD5NfkpXlXkkCAwEAAaMjMCEwDgYDVR0PAQH/
+BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQBxYjHVSKqE
+MJw7CW0GddesULtXXVWGJuZdWJLQlPvPMfIfjIvlcZyS4cdVNiQ3sREFIZz8TpII
+CT0/Pg3sgv/FcOQe1CN0xZYZcyiAZHK1z0fJQq2qVpdv7+tJcjI2vvU6NI24iQCo
+W1wz25trJz9QbdB2MRLMjyz7TSWuafztIvcfEzaIdQ0Whqund/cSuPGQx5IwF83F
+rvlkOyJSH2+VIEBTCIuykJeL0DLTt8cePBQR5L1ISXb4RUMK9ZtqRscBRv8sn7o2
+ixG3wtL0gYF4xLtsQWVxI3iFVrU3WzOH/3c5shVRkWBd+AQRSwCJI4mKH7penJCF
+i3/zzlkvOnjV
+-----END CERTIFICATE-----
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem
new file mode 100644
index 0000000..5e7244b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC6DCCAdKgAwIBAgIRANO6ymxQAjp66KmEka1G6b0wCwYJKoZIhvcNAQELMBYx
+FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxNjIwMjE1MloXDTE3MDkzMDIw
+MjE1MlowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDGA1mAhSOpZspD1dpZ7qVEQrIJw4Xo8252jHaORnEdDiFm
+b6brEmr6jw8t4P3IGxbqBc/TqRV+SSXxwYEVvfpeQKH+SmqStoMNtD3Ura161az4
+V0BcxMtSlsUGpoz+//QCAq8qiaxMwgiyc5253mkQm88anj2cNt7xbewiu/KFWuf7
+BVpNK1+ltpJmlukfcj/G+I1bw7j1KxBjDrFqe5cyDuuZcDL2tmUXP/ZWDyXwSv+H
+AOckqn44z6aXlBkVvOXDBZJqY76d/vWVDNCuZeXRnqlhP3t1kH4V0RQXo+JD2tgt
+JgdU0unzyoFOSWNUBPm73tqmjUGGAmGHBmeegJr/AgMBAAGjNTAzMA4GA1UdDwEB
+/wQEAwIAgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMAsGCSqG
+SIb3DQEBCwOCAQEABVTWl5SmBP+j5He5bQsgnIXjviSKqe40/10V4LJAOmilycRF
+zLrzM+YMwfjg6PLIs8CldAMWHw9y9ktZY4MxkgCktaiaN/QmMTMwFWEcN4wy5IpM
+U5l93eAg7xsnY430h3QBBADujX4wdF3fs8rSL8zAAQFL0ihurwU124K3yXKsrwpb
+CiVUGfIN4sPwjy8Ws9oxHFDC9/P8lgjHZ1nBIf8KSHnMzlxDGj7isQfhtH+7mcCL
+cM1qO2NirS2v7uaEPPY+MJstAz+W7EJCW9dfMSmHna2SDC37Xkin7uEY9z+qaKFL
+8d/XxOB/L8Ucy8VZhdsv0dsBq5KfJntITM0ksQ==
+-----END CERTIFICATE-----
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar
new file mode 100644
index 0000000..e4b066e
Binary files /dev/null and b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar differ
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar
new file mode 100644
index 0000000..32c9ce6
Binary files /dev/null and b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar differ
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem
new file mode 100644
index 0000000..a9346bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAxgNZgIUjqWbKQ9XaWe6lREKyCcOF6PNudox2jkZxHQ4hZm+m
+6xJq+o8PLeD9yBsW6gXP06kVfkkl8cGBFb36XkCh/kpqkraDDbQ91K2tetWs+FdA
+XMTLUpbFBqaM/v/0AgKvKomsTMIIsnOdud5pEJvPGp49nDbe8W3sIrvyhVrn+wVa
+TStfpbaSZpbpH3I/xviNW8O49SsQYw6xanuXMg7rmXAy9rZlFz/2Vg8l8Er/hwDn
+JKp+OM+ml5QZFbzlwwWSamO+nf71lQzQrmXl0Z6pYT97dZB+FdEUF6PiQ9rYLSYH
+VNLp88qBTkljVAT5u97apo1BhgJhhwZnnoCa/wIDAQABAoIBAQCaGy9EC9pmU95l
+DwGh7k5nIrUnTilg1FwLHWSDdCVCZKXv8ENrPelOWZqJrUo1u4eI2L8XTsewgkNq
+tJu/DRzWz9yDaO0qg6rZNobMh+K076lvmZA44twOydJLS8H+D7ua+PXU2FLlZjmY
+kMyXRJZmW6zCXZc7haTbJx6ZJccoquk/DkS4FcFurJP177u1YrWS9TTw9kensUtU
+jQ63uf56UTN1i+0+Rxl7OW1TZlqwlri5I4njg5249+FxwwHzIq8+l7zD7K9pl8c/
+nG1HuulvU2bVlDlRdyslMPAH34vw9Sku1BD8furrJLr1na5lRSLKJODEaIPEsLwv
+CdEUwP9JAoGBAO76ZW80RyNB2fA+wbTq70Sr8CwrXxYemXrez5LKDC7SsohKFCPE
+IedpO/n+nmymiiJvMm874EExoG6BVrbkWkeb+2vinEfOQNlDMsDx7WLjPekP3t6i
+rXHO3CjFooVFq2z3mZa/Nc5NZqu8fNWNCKJxZDJphdoj6sORNJIUvZVjAoGBANQd
+++J+ITcu3/+A6JrGcgLunBFQYPqkiItk0J4QKYKuX5ik9rWcQDN8TTtfW2mDuiQ4
+NrCwuVPq1V1kB16JzH017SsYLo9g8I20YjnBZge9pKTeUaLVTb3C50LW8FBylop0
+Bnm597dNbtSjphjoTMg0XyC19o3Esf2YeWG0QNS1AoGAWWDfFRNJU99qIldmXULM
+0DM6NVrXSk+ReYnhunXEzrJQwXZrR+EwCPurydk36Uz0NuK9yypquhdUeF/5TZfk
+SAoHo5byekyipl9imRUigqyY2BTudvgCxKDoaHtaSFwBPFTyZZYICquaLbrmOXxw
+8UhVgCFFRYvPXuts7QHC0h8CgYBWEvy9gfU0kV7wLX02IUTuj6jhFb7ktpN6DSTi
+nyhZES1VoctDEu6ydcRZTW6ouH12aSE4Pd5WgTqntQmQgVZrkNB25k8ue2Xh+srJ
+KQOgLIJ9LIHwE6KCWG7DnrjRzE3uTPq7to0g4tkQjH/AJ7PQof/gJDayfJjFkXPg
+A+cy6QKBgEPbKpiqscm03gT2QanBut5pg4dqPOxp0SlErA3kSFNTRK3oYBQPC+LH
+qA5nD5brdkeNBB58Rll8Zpzxiff50bcvLP/7/Sb3NjaXFTEY0gVbdRof3n6N0YP3
+Hu5XDNJ9RNkNzE5RIG1g86KE+aKlcrKMaigqAiuIy2PSnjkQeGk8
+-----END RSA PRIVATE KEY-----
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem
new file mode 100644
index 0000000..89cc445
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC/DCCAeagAwIBAgIQMUILcXtvmSOK63zEBo0VXzALBgkqhkiG9w0BAQswFjEU
+MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTQ2WhcNMTcwOTMwMjAy
+MTQ2WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBANxUOUhNnqFnrTlLsBYzfFRZWQo268l+4K4lOJCVbfDonP3g
+Mz0vGi9fcyFqEWSA8Y+ShXna625HTnReCwFdsu0861qCIq7v95hFFCyOe0iIxpd0
+AKLnl90d+1vonE7andgFgoobbTiMly4UK4H6z8D148fFNIihoteOG3PIF89TFxP7
+CJ/3wXnx/IKpdlO8PAnub3tBPJHvGDj7KORLy4IBxRX5VBAdfGNybE66fcrehEva
+rLA4m9pgiaR/Nnr9FdKhPyqYdjflLNvzydxNvMIV4M0hFlhXmYvpMjA5/XsTnsyV
+t9JHJa5Upwqsbne08t7rsm7liZNxZlko8xPOTQcCAwEAAaNKMEgwDgYDVR0PAQH/
+BAQDAgCgMAwGA1UdEwEB/wQCMAAwKAYDVR0RBCEwH4ILYm9vdDJkb2NrZXKHBH8A
+AAGHBAoAAg+HBMCoO2cwCwYJKoZIhvcNAQELA4IBAQAYoYcDkDWkl73FZ0WnPmAj
+LiF7HU95Qg3KyEpFsAJeShSLPPbQntmwhdekEzY4tQ3eKQB/+zHFjzsCr/lmDUmH
+Ea/ryQ17C+jyH+Ykg0IWW6L6veZhvRDg6Z9focVtPVBRxPTqC/Qhb54blWRASV+W
+UreMuXQ5+1dQptAM7ixOeLVHjBi/bd9TL3jvwBVCr9QedteMjjK4TCF9Tbcou+MF
+2w3OJJZMDhcD+YwoK9uJDqlKmcTm/vVMbSsp/pTMcnQ7jxCeR8/XyX+VwTZwaHAa
+o92Q/eg3THAiWhvyT/SzyH9dHHBAyXynUwGCggKawHktfvW4QXRPuLxLrJ7iB5cy
+-----END CERTIFICATE-----
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem
new file mode 100644
index 0000000..c897e5d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEoAIBAAKCAQEA3FQ5SE2eoWetOUuwFjN8VFlZCjbryX7griU4kJVt8Oic/eAz
+PS8aL19zIWoRZIDxj5KFedrrbkdOdF4LAV2y7TzrWoIiru/3mEUULI57SIjGl3QA
+oueX3R37W+icTtqd2AWCihttOIyXLhQrgfrPwPXjx8U0iKGi144bc8gXz1MXE/sI
+n/fBefH8gql2U7w8Ce5ve0E8ke8YOPso5EvLggHFFflUEB18Y3JsTrp9yt6ES9qs
+sDib2mCJpH82ev0V0qE/Kph2N+Us2/PJ3E28whXgzSEWWFeZi+kyMDn9exOezJW3
+0kclrlSnCqxud7Ty3uuybuWJk3FmWSjzE85NBwIDAQABAoIBAG0ak+cW8LeShHf7
+3+2Of0GxoOLrAWWdG5uAuPr31CJYve0FybnBimDtDjD8ujIfm/7xmoEWBEFutA3x
+x9dcU88gvJbsHEqub9gKVQwfXjMz78tt2SbSMiR/xUnk7QorPcCMMfE71aEMFYzu
+1gCed6Rg3vO81t/V0rKVH0j9S7UQz5v/oX15eVDV5LOqyCHwAi6K0eXXbqnbI0TH
+SOQ/nexM2msVXWbO9t6ra6f5V7FXziDK5Xi+rPxRbX9mkrDzxDAevfuRqYBx5vtL
+W2Q2hKjUAHFgXFniNSZBS7dCdAtz0el/3ct+cNmpuTMhhs7M6wC1CuYiZ/DxLiFh
+Si73VckCgYEA+/ceh3+VjtQ0rgEw8sD9bqYEA8IaBiObjneIoFnKBYRG7yZd8JMm
+HD4M/aQ1qhcRLPN7GR03YQULgQJURbKSjJHnhfTXHyeHC3NN4gMVHQXewu2MHCh6
+7FCQ9CfK0KcYLgegVVvL3PrF3hyWGnmTu+G0UkDQRYVnaNrB7snrW6UCgYEA39tq
++MCQdu0moJ5szSZf02undg9EeW6isk9qzi7TId3/MLci2eH7PEnipipPUK3+DERq
+aba0y0TKgBR2EXvXLFJA/+kfdo2loIEHOfox85HVfxgUaFRti63ZI0uF8D0QT2Yy
+oJal+RFghVoSnv4LjhRKEPbIkScTXGjdK+7wFjsCfz79iKRXQQx0ALd/lL0bgkAn
+QNmvrNHcFQeI2p8700WNzC39aX67SsvEt3qxkrjzC1gxhpTAuReIK1gVPPwvqHN8
+BmV20FD5kMlMCix2mNCopwgUWvKvLAvoGFTxncKMA39+aJbuXAjiqJTekKgNvOE7
+i9kEWw0GTNPp3JHV6QECgYAPwb0M11kT1euDIMOdyRazpf86kyaJuZzgGjD1ZFxe
+JOcigbGFTp/FhZnbglzk2+pm6KXo3QBq0mPCki4hWusxZnTGzpz1VlETNCHTFeZQ
+M7KoaIR/N3oie9Et59H8r/+m5xWnMhNqratyl316DX24uXrhKM3DUdHODl+LCR2D
+IwKBgE1MbHuwolUPEw3HeO4R7NMFVTFei7E/fpUsimPfArGg8UydwvloNT1myJos
+N2JzfGGjN2KPVcBk9fOs71mJ6VcK3C3g5JIccplk6h9VNaw55+zdQvKPTzoBoTvy
+A+Fwx2AlF61KeRF87DL2YTRJ6B9MHmWgf7+GVZOxomLgEAcZ
+-----END RSA PRIVATE KEY-----
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
new file mode 100644
index 0000000..5052e2d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
@@ -0,0 +1,1096 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides a fake implementation of the Docker API, useful for
+// testing purpose.
+package testing
+
+import (
+ "archive/tar"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ mathrand "math/rand"
+ "net"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/fsouza/go-dockerclient"
+ "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
+ "github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux"
+)
+
+var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`)
+
+// DockerServer represents a programmable, concurrent (not much), HTTP server
+// implementing a fake version of the Docker remote API.
+//
+// It can used in standalone mode, listening for connections or as an arbitrary
+// HTTP handler.
+//
+// For more details on the remote API, check http://goo.gl/G3plxW.
+type DockerServer struct {
+ containers []*docker.Container
+ execs []*docker.ExecInspect
+ execMut sync.RWMutex
+ cMut sync.RWMutex
+ images []docker.Image
+ iMut sync.RWMutex
+ imgIDs map[string]string
+ networks []*docker.Network
+ netMut sync.RWMutex
+ listener net.Listener
+ mux *mux.Router
+ hook func(*http.Request)
+ failures map[string]string
+ multiFailures []map[string]string
+ execCallbacks map[string]func()
+ statsCallbacks map[string]func(string) docker.Stats
+ customHandlers map[string]http.Handler
+ handlerMutex sync.RWMutex
+ cChan chan<- *docker.Container
+}
+
+// NewServer returns a new instance of the fake server, in standalone mode. Use
+// the method URL to get the URL of the server.
+//
+// It receives the bind address (use 127.0.0.1:0 for getting an available port
+// on the host), a channel of containers and a hook function, that will be
+// called on every request.
+//
+// The fake server will send containers in the channel whenever the container
+// changes its state, via the HTTP API (i.e.: create, start and stop). This
+// channel may be nil, which means that the server won't notify on state
+// changes.
+func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) {
+ listener, err := net.Listen("tcp", bind)
+ if err != nil {
+ return nil, err
+ }
+ server := DockerServer{
+ listener: listener,
+ imgIDs: make(map[string]string),
+ hook: hook,
+ failures: make(map[string]string),
+ execCallbacks: make(map[string]func()),
+ statsCallbacks: make(map[string]func(string) docker.Stats),
+ customHandlers: make(map[string]http.Handler),
+ cChan: containerChan,
+ }
+ server.buildMuxer()
+ go http.Serve(listener, &server)
+ return &server, nil
+}
+
+func (s *DockerServer) notify(container *docker.Container) {
+ if s.cChan != nil {
+ s.cChan <- container
+ }
+}
+
+func (s *DockerServer) buildMuxer() {
+ s.mux = mux.NewRouter()
+ s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer))
+ s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers))
+ s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer))
+ s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer))
+ s.mux.Path("/containers/{id:.*}/rename").Methods("POST").HandlerFunc(s.handlerWrapper(s.renameContainer))
+ s.mux.Path("/containers/{id:.*}/top").Methods("GET").HandlerFunc(s.handlerWrapper(s.topContainer))
+ s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer))
+ s.mux.Path("/containers/{id:.*}/kill").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer))
+ s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer))
+ s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer))
+ s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer))
+ s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer))
+ s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer))
+ s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer))
+ s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer))
+ s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer))
+ s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer))
+ s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer))
+ s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer))
+ s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage))
+ s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage))
+ s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages))
+ s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage))
+ s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage))
+ s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage))
+ s.mux.Path("/images/{name:.*}/tag").Methods("POST").HandlerFunc(s.handlerWrapper(s.tagImage))
+ s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents)
+ s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker))
+ s.mux.Path("/images/load").Methods("POST").HandlerFunc(s.handlerWrapper(s.loadImage))
+ s.mux.Path("/images/{id:.*}/get").Methods("GET").HandlerFunc(s.handlerWrapper(s.getImage))
+ s.mux.Path("/networks").Methods("GET").HandlerFunc(s.handlerWrapper(s.listNetworks))
+ s.mux.Path("/networks/{id:.*}").Methods("GET").HandlerFunc(s.handlerWrapper(s.networkInfo))
+ s.mux.Path("/networks").Methods("POST").HandlerFunc(s.handlerWrapper(s.createNetwork))
+}
+
+// SetHook changes the hook function used by the server.
+//
+// The hook function is a function called on every request.
+func (s *DockerServer) SetHook(hook func(*http.Request)) {
+ s.hook = hook
+}
+
+// PrepareExec adds a callback to a container exec in the fake server.
+//
+// This function will be called whenever the given exec id is started, and the
+// given exec id will remain in the "Running" start while the function is
+// running, so it's useful for emulating an exec that runs for two seconds, for
+// example:
+//
+// opts := docker.CreateExecOptions{
+// AttachStdin: true,
+// AttachStdout: true,
+// AttachStderr: true,
+// Tty: true,
+// Cmd: []string{"/bin/bash", "-l"},
+// }
+// // Client points to a fake server.
+// exec, err := client.CreateExec(opts)
+// // handle error
+// server.PrepareExec(exec.ID, func() {time.Sleep(2 * time.Second)})
+// err = client.StartExec(exec.ID, docker.StartExecOptions{Tty: true}) // will block for 2 seconds
+// // handle error
+func (s *DockerServer) PrepareExec(id string, callback func()) {
+ s.execCallbacks[id] = callback
+}
+
+// PrepareStats adds a callback that will be called for each container stats
+// call.
+//
+// This callback function will be called multiple times if stream is set to
+// true when stats is called.
+func (s *DockerServer) PrepareStats(id string, callback func(string) docker.Stats) {
+ s.statsCallbacks[id] = callback
+}
+
+// PrepareFailure adds a new expected failure based on a URL regexp it receives
+// an id for the failure.
+func (s *DockerServer) PrepareFailure(id string, urlRegexp string) {
+ s.failures[id] = urlRegexp
+}
+
+// PrepareMultiFailures enqueues a new expected failure based on a URL regexp
+// it receives an id for the failure.
+func (s *DockerServer) PrepareMultiFailures(id string, urlRegexp string) {
+ s.multiFailures = append(s.multiFailures, map[string]string{"error": id, "url": urlRegexp})
+}
+
+// ResetFailure removes an expected failure identified by the given id.
+func (s *DockerServer) ResetFailure(id string) {
+ delete(s.failures, id)
+}
+
+// ResetMultiFailures removes all enqueued failures.
+func (s *DockerServer) ResetMultiFailures() {
+ s.multiFailures = []map[string]string{}
+}
+
+// CustomHandler registers a custom handler for a specific path.
+//
+// For example:
+//
+// server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// http.Error(w, "Something wrong is not right", http.StatusInternalServerError)
+// }))
+func (s *DockerServer) CustomHandler(path string, handler http.Handler) {
+ s.handlerMutex.Lock()
+ s.customHandlers[path] = handler
+ s.handlerMutex.Unlock()
+}
+
+// MutateContainer changes the state of a container, returning an error if the
+// given id does not match to any container "running" in the server.
+func (s *DockerServer) MutateContainer(id string, state docker.State) error {
+ for _, container := range s.containers {
+ if container.ID == id {
+ container.State = state
+ return nil
+ }
+ }
+ return errors.New("container not found")
+}
+
+// Stop stops the server.
+func (s *DockerServer) Stop() {
+ if s.listener != nil {
+ s.listener.Close()
+ }
+}
+
+// URL returns the HTTP URL of the server.
+func (s *DockerServer) URL() string {
+ if s.listener == nil {
+ return ""
+ }
+ return "http://" + s.listener.Addr().String() + "/"
+}
+
+// ServeHTTP handles HTTP requests sent to the server.
+func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.handlerMutex.RLock()
+ defer s.handlerMutex.RUnlock()
+ for re, handler := range s.customHandlers {
+ if m, _ := regexp.MatchString(re, r.URL.Path); m {
+ handler.ServeHTTP(w, r)
+ return
+ }
+ }
+ s.mux.ServeHTTP(w, r)
+ if s.hook != nil {
+ s.hook(r)
+ }
+}
+
+// DefaultHandler returns default http.Handler mux, it allows customHandlers to
+// call the default behavior if wanted.
+func (s *DockerServer) DefaultHandler() http.Handler {
+ return s.mux
+}
+
+func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ for errorID, urlRegexp := range s.failures {
+ matched, err := regexp.MatchString(urlRegexp, r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ if !matched {
+ continue
+ }
+ http.Error(w, errorID, http.StatusBadRequest)
+ return
+ }
+ for i, failure := range s.multiFailures {
+ matched, err := regexp.MatchString(failure["url"], r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ if !matched {
+ continue
+ }
+ http.Error(w, failure["error"], http.StatusBadRequest)
+ s.multiFailures = append(s.multiFailures[:i], s.multiFailures[i+1:]...)
+ return
+ }
+ f(w, r)
+ }
+}
+
+func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) {
+ all := r.URL.Query().Get("all")
+ s.cMut.RLock()
+ result := make([]docker.APIContainers, 0, len(s.containers))
+ for _, container := range s.containers {
+ if all == "1" || container.State.Running {
+ result = append(result, docker.APIContainers{
+ ID: container.ID,
+ Image: container.Image,
+ Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")),
+ Created: container.Created.Unix(),
+ Status: container.State.String(),
+ Ports: container.NetworkSettings.PortMappingAPI(),
+ Names: []string{fmt.Sprintf("/%s", container.Name)},
+ })
+ }
+ }
+ s.cMut.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) {
+ s.cMut.RLock()
+ result := make([]docker.APIImages, len(s.images))
+ for i, image := range s.images {
+ result[i] = docker.APIImages{
+ ID: image.ID,
+ Created: image.Created.Unix(),
+ }
+ for tag, id := range s.imgIDs {
+ if id == image.ID {
+ result[i].RepoTags = append(result[i].RepoTags, tag)
+ }
+ }
+ }
+ s.cMut.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) findImage(id string) (string, error) {
+ s.iMut.RLock()
+ defer s.iMut.RUnlock()
+ image, ok := s.imgIDs[id]
+ if ok {
+ return image, nil
+ }
+ image, _, err := s.findImageByID(id)
+ return image, err
+}
+
+func (s *DockerServer) findImageByID(id string) (string, int, error) {
+ s.iMut.RLock()
+ defer s.iMut.RUnlock()
+ for i, image := range s.images {
+ if image.ID == id {
+ return image.ID, i, nil
+ }
+ }
+ return "", -1, errors.New("No such image")
+}
+
+func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) {
+ var config struct {
+ *docker.Config
+ HostConfig *docker.HostConfig
+ }
+ defer r.Body.Close()
+ err := json.NewDecoder(r.Body).Decode(&config)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ name := r.URL.Query().Get("name")
+ if name != "" && !nameRegexp.MatchString(name) {
+ http.Error(w, "Invalid container name", http.StatusInternalServerError)
+ return
+ }
+ if _, err := s.findImage(config.Image); err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ ports := map[docker.Port][]docker.PortBinding{}
+ for port := range config.ExposedPorts {
+ ports[port] = []docker.PortBinding{{
+ HostIP: "0.0.0.0",
+ HostPort: strconv.Itoa(mathrand.Int() % 65536),
+ }}
+ }
+
+ //the container may not have cmd when using a Dockerfile
+ var path string
+ var args []string
+ if len(config.Cmd) == 1 {
+ path = config.Cmd[0]
+ } else if len(config.Cmd) > 1 {
+ path = config.Cmd[0]
+ args = config.Cmd[1:]
+ }
+
+ generatedID := s.generateID()
+ config.Config.Hostname = generatedID[:12]
+ container := docker.Container{
+ Name: name,
+ ID: generatedID,
+ Created: time.Now(),
+ Path: path,
+ Args: args,
+ Config: config.Config,
+ HostConfig: config.HostConfig,
+ State: docker.State{
+ Running: false,
+ Pid: mathrand.Int() % 50000,
+ ExitCode: 0,
+ StartedAt: time.Now(),
+ },
+ Image: config.Image,
+ NetworkSettings: &docker.NetworkSettings{
+ IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2),
+ IPPrefixLen: 24,
+ Gateway: "172.16.42.1",
+ Bridge: "docker0",
+ Ports: ports,
+ },
+ }
+ s.cMut.Lock()
+ if container.Name != "" {
+ for _, c := range s.containers {
+ if c.Name == container.Name {
+ defer s.cMut.Unlock()
+ http.Error(w, "there's already a container with this name", http.StatusConflict)
+ return
+ }
+ }
+ }
+ s.containers = append(s.containers, &container)
+ s.cMut.Unlock()
+ w.WriteHeader(http.StatusCreated)
+ s.notify(&container)
+ var c = struct{ ID string }{ID: container.ID}
+ json.NewEncoder(w).Encode(c)
+}
+
+func (s *DockerServer) generateID() string {
+ var buf [16]byte
+ rand.Read(buf[:])
+ return fmt.Sprintf("%x", buf)
+}
+
+func (s *DockerServer) renameContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, index, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ copy := *container
+ copy.Name = r.URL.Query().Get("name")
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ if s.containers[index].ID == copy.ID {
+ s.containers[index] = ©
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
+
+func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(container)
+}
+
+func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ _, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ stream, _ := strconv.ParseBool(r.URL.Query().Get("stream"))
+ callback := s.statsCallbacks[id]
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ encoder := json.NewEncoder(w)
+ for {
+ var stats docker.Stats
+ if callback != nil {
+ stats = callback(id)
+ }
+ encoder.Encode(stats)
+ if !stream {
+ break
+ }
+ }
+}
+
+func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ if !container.State.Running {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Container %s is not running", id)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ result := docker.TopResult{
+ Titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"},
+ Processes: [][]string{
+ {"root", "7535", "7516", "0", "03:20", "?", "00:00:00", container.Path + " " + strings.Join(container.Args, " ")},
+ },
+ }
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ defer r.Body.Close()
+ var hostConfig docker.HostConfig
+ err = json.NewDecoder(r.Body).Decode(&hostConfig)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ container.HostConfig = &hostConfig
+ if container.State.Running {
+ http.Error(w, "", http.StatusNotModified)
+ return
+ }
+ container.State.Running = true
+ s.notify(container)
+}
+
+func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ if !container.State.Running {
+ http.Error(w, "Container not running", http.StatusBadRequest)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ container.State.Running = false
+ s.notify(container)
+}
+
+func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ if container.State.Paused {
+ http.Error(w, "Container already paused", http.StatusBadRequest)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ container.State.Paused = true
+}
+
+func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ if !container.State.Paused {
+ http.Error(w, "Container not paused", http.StatusBadRequest)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ container.State.Paused = false
+}
+
+func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ http.Error(w, "cannot hijack connection", http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/vnd.docker.raw-stream")
+ w.WriteHeader(http.StatusOK)
+ conn, _, err := hijacker.Hijack()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ wg := sync.WaitGroup{}
+ if r.URL.Query().Get("stdin") == "1" {
+ wg.Add(1)
+ go func() {
+ ioutil.ReadAll(conn)
+ wg.Done()
+ }()
+ }
+ outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout)
+ if container.State.Running {
+ fmt.Fprintf(outStream, "Container is running\n")
+ } else {
+ fmt.Fprintf(outStream, "Container is not running\n")
+ }
+ fmt.Fprintln(outStream, "What happened?")
+ fmt.Fprintln(outStream, "Something happened")
+ wg.Wait()
+ if r.URL.Query().Get("stream") == "1" {
+ for {
+ time.Sleep(1e6)
+ s.cMut.RLock()
+ if !container.State.Running {
+ s.cMut.RUnlock()
+ break
+ }
+ s.cMut.RUnlock()
+ }
+ }
+ conn.Close()
+}
+
+func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ for {
+ time.Sleep(1e6)
+ s.cMut.RLock()
+ if !container.State.Running {
+ s.cMut.RUnlock()
+ break
+ }
+ s.cMut.RUnlock()
+ }
+ result := map[string]int{"StatusCode": container.State.ExitCode}
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ force := r.URL.Query().Get("force")
+ _, index, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ if s.containers[index].State.Running && force != "1" {
+ msg := "Error: API error (406): Impossible to remove a running container, please stop it first"
+ http.Error(w, msg, http.StatusInternalServerError)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ s.containers[index] = s.containers[len(s.containers)-1]
+ s.containers = s.containers[:len(s.containers)-1]
+}
+
+func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) {
+ id := r.URL.Query().Get("container")
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ var config *docker.Config
+ runConfig := r.URL.Query().Get("run")
+ if runConfig != "" {
+ config = new(docker.Config)
+ err = json.Unmarshal([]byte(runConfig), config)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ }
+ w.WriteHeader(http.StatusOK)
+ image := docker.Image{
+ ID: "img-" + container.ID,
+ Parent: container.Image,
+ Container: container.ID,
+ Comment: r.URL.Query().Get("m"),
+ Author: r.URL.Query().Get("author"),
+ Config: config,
+ }
+ repository := r.URL.Query().Get("repo")
+ tag := r.URL.Query().Get("tag")
+ s.iMut.Lock()
+ s.images = append(s.images, image)
+ if repository != "" {
+ if tag != "" {
+ repository += ":" + tag
+ }
+ s.imgIDs[repository] = image.ID
+ }
+ s.iMut.Unlock()
+ fmt.Fprintf(w, `{"ID":%q}`, image.ID)
+}
+
+func (s *DockerServer) findContainer(idOrName string) (*docker.Container, int, error) {
+ s.cMut.RLock()
+ defer s.cMut.RUnlock()
+ for i, container := range s.containers {
+ if container.ID == idOrName || container.Name == idOrName {
+ return container, i, nil
+ }
+ }
+ return nil, -1, errors.New("No such container")
+}
+
+func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) {
+ if ct := r.Header.Get("Content-Type"); ct == "application/tar" {
+ gotDockerFile := false
+ tr := tar.NewReader(r.Body)
+ for {
+ header, err := tr.Next()
+ if err != nil {
+ break
+ }
+ if header.Name == "Dockerfile" {
+ gotDockerFile = true
+ }
+ }
+ if !gotDockerFile {
+ w.WriteHeader(http.StatusBadRequest)
+ w.Write([]byte("miss Dockerfile"))
+ return
+ }
+ }
+ //we did not use that Dockerfile to build image cause we are a fake Docker daemon
+ image := docker.Image{
+ ID: s.generateID(),
+ Created: time.Now(),
+ }
+
+ query := r.URL.Query()
+ repository := image.ID
+ if t := query.Get("t"); t != "" {
+ repository = t
+ }
+ s.iMut.Lock()
+ s.images = append(s.images, image)
+ s.imgIDs[repository] = image.ID
+ s.iMut.Unlock()
+ w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID)))
+}
+
+func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) {
+ fromImageName := r.URL.Query().Get("fromImage")
+ tag := r.URL.Query().Get("tag")
+ image := docker.Image{
+ ID: s.generateID(),
+ }
+ s.iMut.Lock()
+ s.images = append(s.images, image)
+ if fromImageName != "" {
+ if tag != "" {
+ fromImageName = fmt.Sprintf("%s:%s", fromImageName, tag)
+ }
+ s.imgIDs[fromImageName] = image.ID
+ }
+ s.iMut.Unlock()
+}
+
+func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) {
+ name := mux.Vars(r)["name"]
+ tag := r.URL.Query().Get("tag")
+ if tag != "" {
+ name += ":" + tag
+ }
+ s.iMut.RLock()
+ if _, ok := s.imgIDs[name]; !ok {
+ s.iMut.RUnlock()
+ http.Error(w, "No such image", http.StatusNotFound)
+ return
+ }
+ s.iMut.RUnlock()
+ fmt.Fprintln(w, "Pushing...")
+ fmt.Fprintln(w, "Pushed")
+}
+
+func (s *DockerServer) tagImage(w http.ResponseWriter, r *http.Request) {
+ name := mux.Vars(r)["name"]
+ s.iMut.RLock()
+ if _, ok := s.imgIDs[name]; !ok {
+ s.iMut.RUnlock()
+ http.Error(w, "No such image", http.StatusNotFound)
+ return
+ }
+ s.iMut.RUnlock()
+ s.iMut.Lock()
+ defer s.iMut.Unlock()
+ newRepo := r.URL.Query().Get("repo")
+ newTag := r.URL.Query().Get("tag")
+ if newTag != "" {
+ newRepo += ":" + newTag
+ }
+ s.imgIDs[newRepo] = s.imgIDs[name]
+ w.WriteHeader(http.StatusCreated)
+}
+
+func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ s.iMut.RLock()
+ var tag string
+ if img, ok := s.imgIDs[id]; ok {
+ id, tag = img, id
+ }
+ var tags []string
+ for tag, taggedID := range s.imgIDs {
+ if taggedID == id {
+ tags = append(tags, tag)
+ }
+ }
+ s.iMut.RUnlock()
+ _, index, err := s.findImageByID(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ s.iMut.Lock()
+ defer s.iMut.Unlock()
+ if len(tags) < 2 {
+ s.images[index] = s.images[len(s.images)-1]
+ s.images = s.images[:len(s.images)-1]
+ }
+ if tag != "" {
+ delete(s.imgIDs, tag)
+ }
+}
+
+func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) {
+ name := mux.Vars(r)["name"]
+ s.iMut.RLock()
+ defer s.iMut.RUnlock()
+ if id, ok := s.imgIDs[name]; ok {
+ for _, img := range s.images {
+ if img.ID == id {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(img)
+ return
+ }
+ }
+ }
+ http.Error(w, "not found", http.StatusNotFound)
+}
+
+func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ var events [][]byte
+ count := mathrand.Intn(20)
+ for i := 0; i < count; i++ {
+ data, err := json.Marshal(s.generateEvent())
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ events = append(events, data)
+ }
+ w.WriteHeader(http.StatusOK)
+ for _, d := range events {
+ fmt.Fprintln(w, d)
+ time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond)
+ }
+}
+
+func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+}
+
+func (s *DockerServer) generateEvent() *docker.APIEvents {
+ var eventType string
+ switch mathrand.Intn(4) {
+ case 0:
+ eventType = "create"
+ case 1:
+ eventType = "start"
+ case 2:
+ eventType = "stop"
+ case 3:
+ eventType = "destroy"
+ }
+ return &docker.APIEvents{
+ ID: s.generateID(),
+ Status: eventType,
+ From: "mybase:latest",
+ Time: time.Now().Unix(),
+ }
+}
+
+func (s *DockerServer) loadImage(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+}
+
+func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Header().Set("Content-Type", "application/tar")
+}
+
+func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+
+ execID := s.generateID()
+ container.ExecIDs = append(container.ExecIDs, execID)
+
+ exec := docker.ExecInspect{
+ ID: execID,
+ Container: *container,
+ }
+
+ var params docker.CreateExecOptions
+ err = json.NewDecoder(r.Body).Decode(¶ms)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if len(params.Cmd) > 0 {
+ exec.ProcessConfig.EntryPoint = params.Cmd[0]
+ if len(params.Cmd) > 1 {
+ exec.ProcessConfig.Arguments = params.Cmd[1:]
+ }
+ }
+
+ exec.ProcessConfig.User = params.User
+ exec.ProcessConfig.Tty = params.Tty
+
+ s.execMut.Lock()
+ s.execs = append(s.execs, &exec)
+ s.execMut.Unlock()
+ w.WriteHeader(http.StatusOK)
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID})
+}
+
+func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ if exec, err := s.getExec(id, false); err == nil {
+ s.execMut.Lock()
+ exec.Running = true
+ s.execMut.Unlock()
+ if callback, ok := s.execCallbacks[id]; ok {
+ callback()
+ delete(s.execCallbacks, id)
+ } else if callback, ok := s.execCallbacks["*"]; ok {
+ callback()
+ delete(s.execCallbacks, "*")
+ }
+ s.execMut.Lock()
+ exec.Running = false
+ s.execMut.Unlock()
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+}
+
+func (s *DockerServer) resizeExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ if _, err := s.getExec(id, false); err == nil {
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+}
+
+func (s *DockerServer) inspectExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ if exec, err := s.getExec(id, true); err == nil {
+ w.WriteHeader(http.StatusOK)
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(exec)
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+}
+
+func (s *DockerServer) getExec(id string, copy bool) (*docker.ExecInspect, error) {
+ s.execMut.RLock()
+ defer s.execMut.RUnlock()
+ for _, exec := range s.execs {
+ if exec.ID == id {
+ if copy {
+ cp := *exec
+ exec = &cp
+ }
+ return exec, nil
+ }
+ }
+ return nil, errors.New("exec not found")
+}
+
+func (s *DockerServer) findNetwork(idOrName string) (*docker.Network, int, error) {
+ s.netMut.RLock()
+ defer s.netMut.RUnlock()
+ for i, network := range s.networks {
+ if network.ID == idOrName || network.Name == idOrName {
+ return network, i, nil
+ }
+ }
+ return nil, -1, errors.New("No such network")
+}
+
+func (s *DockerServer) listNetworks(w http.ResponseWriter, r *http.Request) {
+ s.netMut.RLock()
+ result := make([]docker.Network, 0, len(s.networks))
+ for _, network := range s.networks {
+ result = append(result, *network)
+ }
+ s.netMut.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) networkInfo(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ network, _, err := s.findNetwork(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(network)
+}
+
+// isValidName validates configuration objects supported by libnetwork
+func isValidName(name string) bool {
+ if name == "" || strings.Contains(name, ".") {
+ return false
+ }
+ return true
+}
+
+func (s *DockerServer) createNetwork(w http.ResponseWriter, r *http.Request) {
+ var config *docker.CreateNetworkOptions
+ defer r.Body.Close()
+ err := json.NewDecoder(r.Body).Decode(&config)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ if !isValidName(config.Name) {
+ http.Error(w, "Invalid network name", http.StatusBadRequest)
+ return
+ }
+ if n, _, _ := s.findNetwork(config.Name); n != nil {
+ http.Error(w, "network already exists", http.StatusForbidden)
+ return
+ }
+
+ generatedID := s.generateID()
+ network := docker.Network{
+ Name: config.Name,
+ ID: generatedID,
+ Type: config.NetworkType,
+ }
+ s.netMut.Lock()
+ s.networks = append(s.networks, &network)
+ s.netMut.Unlock()
+ w.WriteHeader(http.StatusCreated)
+ var c = struct{ ID string }{ID: network.ID}
+ json.NewEncoder(w).Encode(c)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go
new file mode 100644
index 0000000..55f4317
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go
@@ -0,0 +1,96 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The content is borrowed from Docker's own source code to provide a simple
+// tls based dialer
+
+package docker
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "time"
+)
+
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if cwc, ok := c.rawConn.(interface {
+ CloseWrite() error
+ }); ok {
+ return cwc.CloseWrite()
+ }
+ return nil
+}
+
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ rawConn, err := dialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ c := *config
+ c.ServerName = hostname
+ config = &c
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go
new file mode 100644
index 0000000..a989a6e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go
@@ -0,0 +1,127 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+)
+
+var (
+ // ErrNoSuchVolume is the error returned when the volume does not exist.
+ ErrNoSuchVolume = errors.New("no such volume")
+
+ // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use.
+ ErrVolumeInUse = errors.New("volume in use and cannot be removed")
+)
+
+// Volume represents a volume.
+//
+// See https://goo.gl/FZA4BK for more details.
+type Volume struct {
+ Name string `json:"Name" yaml:"Name"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"`
+}
+
+// ListVolumesOptions specify parameters to the ListVolumes function.
+//
+// See https://goo.gl/FZA4BK for more details.
+type ListVolumesOptions struct {
+ Filters map[string][]string
+}
+
+// ListVolumes returns a list of available volumes in the server.
+//
+// See https://goo.gl/FZA4BK for more details.
+func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) {
+ resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
+ return nil, err
+ }
+ var volumes []Volume
+ volumesJSON, ok := m["Volumes"]
+ if !ok {
+ return volumes, nil
+ }
+ data, err := json.Marshal(volumesJSON)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(data, &volumes); err != nil {
+ return nil, err
+ }
+ return volumes, nil
+}
+
+// CreateVolumeOptions specify parameters to the CreateVolume function.
+//
+// See https://goo.gl/pBUbZ9 for more details.
+type CreateVolumeOptions struct {
+ Name string
+ Driver string
+ DriverOpts map[string]string
+}
+
+// CreateVolume creates a volume on the server.
+//
+// See https://goo.gl/pBUbZ9 for more details.
+func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) {
+ resp, err := c.do("POST", "/volumes", doOptions{data: opts})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var volume Volume
+ if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil {
+ return nil, err
+ }
+ return &volume, nil
+}
+
+// InspectVolume returns a volume by its name.
+//
+// See https://goo.gl/0g9A6i for more details.
+func (c *Client) InspectVolume(name string) (*Volume, error) {
+ resp, err := c.do("GET", "/volumes/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchVolume
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var volume Volume
+ if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil {
+ return nil, err
+ }
+ return &volume, nil
+}
+
+// RemoveVolume removes a volume by its name.
+//
+// See https://goo.gl/79GNQz for more details.
+func (c *Client) RemoveVolume(name string) error {
+ resp, err := c.do("DELETE", "/volumes/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound {
+ return ErrNoSuchVolume
+ }
+ if e.Status == http.StatusConflict {
+ return ErrVolumeInUse
+ }
+ }
+ return nil
+ }
+ defer resp.Body.Close()
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go
new file mode 100644
index 0000000..ce78eff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package internal
+
+import (
+ "strings"
+)
+
+const (
+ WatchState = 1 << iota
+ MultiState
+ SubscribeState
+ MonitorState
+)
+
+type CommandInfo struct {
+ Set, Clear int
+}
+
+var commandInfos = map[string]CommandInfo{
+ "WATCH": {Set: WatchState},
+ "UNWATCH": {Clear: WatchState},
+ "MULTI": {Set: MultiState},
+ "EXEC": {Clear: WatchState | MultiState},
+ "DISCARD": {Clear: WatchState | MultiState},
+ "PSUBSCRIBE": {Set: SubscribeState},
+ "SUBSCRIBE": {Set: SubscribeState},
+ "MONITOR": {Set: MonitorState},
+}
+
+func LookupCommandInfo(commandName string) CommandInfo {
+ return commandInfos[strings.ToUpper(commandName)]
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go
new file mode 100644
index 0000000..5f955c4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go
@@ -0,0 +1,65 @@
+// Copyright 2014 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package redistest contains utilities for writing Redigo tests.
+package redistest
+
+import (
+ "errors"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+)
+
+type testConn struct {
+ redis.Conn
+}
+
+func (t testConn) Close() error {
+ _, err := t.Conn.Do("SELECT", "9")
+ if err != nil {
+ return nil
+ }
+ _, err = t.Conn.Do("FLUSHDB")
+ if err != nil {
+ return err
+ }
+ return t.Conn.Close()
+}
+
+// Dial dials the local Redis server and selects database 9. To prevent
+// stomping on real data, DialTestDB fails if database 9 contains data. The
+// returned connection flushes database 9 on close.
+func Dial() (redis.Conn, error) {
+ c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = c.Do("SELECT", "9")
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := redis.Int(c.Do("DBSIZE"))
+ if err != nil {
+ return nil, err
+ }
+
+ if n != 0 {
+ return nil, errors.New("database #9 is not empty, test can not continue")
+ }
+
+ return testConn{c}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go
new file mode 100644
index 0000000..ac0e971
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go
@@ -0,0 +1,455 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// conn is the low-level implementation of Conn
+type conn struct {
+
+ // Shared
+ mu sync.Mutex
+ pending int
+ err error
+ conn net.Conn
+
+ // Read
+ readTimeout time.Duration
+ br *bufio.Reader
+
+ // Write
+ writeTimeout time.Duration
+ bw *bufio.Writer
+
+ // Scratch space for formatting argument length.
+ // '*' or '$', length, "\r\n"
+ lenScratch [32]byte
+
+ // Scratch space for formatting integers and floats.
+ numScratch [40]byte
+}
+
+// Dial connects to the Redis server at the given network and address.
+func Dial(network, address string) (Conn, error) {
+ dialer := xDialer{}
+ return dialer.Dial(network, address)
+}
+
+// DialTimeout acts like Dial but takes timeouts for establishing the
+// connection to the server, writing a command and reading a reply.
+func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
+ netDialer := net.Dialer{Timeout: connectTimeout}
+ dialer := xDialer{
+ NetDial: netDialer.Dial,
+ ReadTimeout: readTimeout,
+ WriteTimeout: writeTimeout,
+ }
+ return dialer.Dial(network, address)
+}
+
+// A Dialer specifies options for connecting to a Redis server.
+type xDialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, then net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // ReadTimeout specifies the timeout for reading a single command
+ // reply. If ReadTimeout is zero, then no timeout is used.
+ ReadTimeout time.Duration
+
+ // WriteTimeout specifies the timeout for writing a single command. If
+ // WriteTimeout is zero, then no timeout is used.
+ WriteTimeout time.Duration
+}
+
+// Dial connects to the Redis server at address on the named network.
+func (d *xDialer) Dial(network, address string) (Conn, error) {
+ dial := d.NetDial
+ if dial == nil {
+ dial = net.Dial
+ }
+ netConn, err := dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: d.ReadTimeout,
+ writeTimeout: d.WriteTimeout,
+ }, nil
+}
+
+// NewConn returns a new Redigo connection for the given net connection.
+func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
+ return &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: readTimeout,
+ writeTimeout: writeTimeout,
+ }
+}
+
+func (c *conn) Close() error {
+ c.mu.Lock()
+ err := c.err
+ if c.err == nil {
+ c.err = errors.New("redigo: closed")
+ err = c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) fatal(err error) error {
+ c.mu.Lock()
+ if c.err == nil {
+ c.err = err
+ // Close connection to force errors on subsequent calls and to unblock
+ // other reader or writer.
+ c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) Err() error {
+ c.mu.Lock()
+ err := c.err
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) writeLen(prefix byte, n int) error {
+ c.lenScratch[len(c.lenScratch)-1] = '\n'
+ c.lenScratch[len(c.lenScratch)-2] = '\r'
+ i := len(c.lenScratch) - 3
+ for {
+ c.lenScratch[i] = byte('0' + n%10)
+ i -= 1
+ n = n / 10
+ if n == 0 {
+ break
+ }
+ }
+ c.lenScratch[i] = prefix
+ _, err := c.bw.Write(c.lenScratch[i:])
+ return err
+}
+
+func (c *conn) writeString(s string) error {
+ c.writeLen('$', len(s))
+ c.bw.WriteString(s)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeBytes(p []byte) error {
+ c.writeLen('$', len(p))
+ c.bw.Write(p)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeInt64(n int64) error {
+ return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
+}
+
+func (c *conn) writeFloat64(n float64) error {
+ return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
+}
+
+func (c *conn) writeCommand(cmd string, args []interface{}) (err error) {
+ c.writeLen('*', 1+len(args))
+ err = c.writeString(cmd)
+ for _, arg := range args {
+ if err != nil {
+ break
+ }
+ switch arg := arg.(type) {
+ case string:
+ err = c.writeString(arg)
+ case []byte:
+ err = c.writeBytes(arg)
+ case int:
+ err = c.writeInt64(int64(arg))
+ case int64:
+ err = c.writeInt64(arg)
+ case float64:
+ err = c.writeFloat64(arg)
+ case bool:
+ if arg {
+ err = c.writeString("1")
+ } else {
+ err = c.writeString("0")
+ }
+ case nil:
+ err = c.writeString("")
+ default:
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ err = c.writeBytes(buf.Bytes())
+ }
+ }
+ return err
+}
+
+type protocolError string
+
+func (pe protocolError) Error() string {
+ return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
+}
+
+func (c *conn) readLine() ([]byte, error) {
+ p, err := c.br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ return nil, protocolError("long response line")
+ }
+ if err != nil {
+ return nil, err
+ }
+ i := len(p) - 2
+ if i < 0 || p[i] != '\r' {
+ return nil, protocolError("bad response line terminator")
+ }
+ return p[:i], nil
+}
+
+// parseLen parses bulk string and array lengths.
+func parseLen(p []byte) (int, error) {
+ if len(p) == 0 {
+ return -1, protocolError("malformed length")
+ }
+
+ if p[0] == '-' && len(p) == 2 && p[1] == '1' {
+ // handle $-1 and $-1 null replies.
+ return -1, nil
+ }
+
+ var n int
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return -1, protocolError("illegal bytes in length")
+ }
+ n += int(b - '0')
+ }
+
+ return n, nil
+}
+
+// parseInt parses an integer reply.
+func parseInt(p []byte) (interface{}, error) {
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+
+ var negate bool
+ if p[0] == '-' {
+ negate = true
+ p = p[1:]
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+ }
+
+ var n int64
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return 0, protocolError("illegal bytes in length")
+ }
+ n += int64(b - '0')
+ }
+
+ if negate {
+ n = -n
+ }
+ return n, nil
+}
+
+var (
+ okReply interface{} = "OK"
+ pongReply interface{} = "PONG"
+)
+
+func (c *conn) readReply() (interface{}, error) {
+ line, err := c.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if len(line) == 0 {
+ return nil, protocolError("short response line")
+ }
+ switch line[0] {
+ case '+':
+ switch {
+ case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
+ // Avoid allocation for frequent "+OK" response.
+ return okReply, nil
+ case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
+ // Avoid allocation in PING command benchmarks :)
+ return pongReply, nil
+ default:
+ return string(line[1:]), nil
+ }
+ case '-':
+ return Error(string(line[1:])), nil
+ case ':':
+ return parseInt(line[1:])
+ case '$':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ p := make([]byte, n)
+ _, err = io.ReadFull(c.br, p)
+ if err != nil {
+ return nil, err
+ }
+ if line, err := c.readLine(); err != nil {
+ return nil, err
+ } else if len(line) != 0 {
+ return nil, protocolError("bad bulk string format")
+ }
+ return p, nil
+ case '*':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ r := make([]interface{}, n)
+ for i := range r {
+ r[i], err = c.readReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return r, nil
+ }
+ return nil, protocolError("unexpected response line")
+}
+
+func (c *conn) Send(cmd string, args ...interface{}) error {
+ c.mu.Lock()
+ c.pending += 1
+ c.mu.Unlock()
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.writeCommand(cmd, args); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Flush() error {
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.bw.Flush(); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Receive() (reply interface{}, err error) {
+ if c.readTimeout != 0 {
+ c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
+ }
+ if reply, err = c.readReply(); err != nil {
+ return nil, c.fatal(err)
+ }
+ // When using pub/sub, the number of receives can be greater than the
+ // number of sends. To enable normal use of the connection after
+ // unsubscribing from all channels, we do not decrement pending to a
+ // negative value.
+ //
+ // The pending field is decremented after the reply is read to handle the
+ // case where Receive is called before Send.
+ c.mu.Lock()
+ if c.pending > 0 {
+ c.pending -= 1
+ }
+ c.mu.Unlock()
+ if err, ok := reply.(Error); ok {
+ return nil, err
+ }
+ return
+}
+
+func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
+ c.mu.Lock()
+ pending := c.pending
+ c.pending = 0
+ c.mu.Unlock()
+
+ if cmd == "" && pending == 0 {
+ return nil, nil
+ }
+
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+
+ if cmd != "" {
+ c.writeCommand(cmd, args)
+ }
+
+ if err := c.bw.Flush(); err != nil {
+ return nil, c.fatal(err)
+ }
+
+ if c.readTimeout != 0 {
+ c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
+ }
+
+ if cmd == "" {
+ reply := make([]interface{}, pending)
+ for i := range reply {
+ r, e := c.readReply()
+ if e != nil {
+ return nil, c.fatal(e)
+ }
+ reply[i] = r
+ }
+ return reply, nil
+ }
+
+ var err error
+ var reply interface{}
+ for i := 0; i <= pending; i++ {
+ var e error
+ if reply, e = c.readReply(); e != nil {
+ return nil, c.fatal(e)
+ }
+ if e, ok := reply.(Error); ok && err == nil {
+ err = e
+ }
+ }
+ return reply, err
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go
new file mode 100644
index 0000000..1ae6f0c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go
@@ -0,0 +1,169 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package redis is a client for the Redis database.
+//
+// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more
+// documentation about this package.
+//
+// Connections
+//
+// The Conn interface is the primary interface for working with Redis.
+// Applications create connections by calling the Dial, DialWithTimeout or
+// NewConn functions. In the future, functions will be added for creating
+// sharded and other types of connections.
+//
+// The application must call the connection Close method when the application
+// is done with the connection.
+//
+// Executing Commands
+//
+// The Conn interface has a generic method for executing Redis commands:
+//
+// Do(commandName string, args ...interface{}) (reply interface{}, err error)
+//
+// The Redis command reference (http://redis.io/commands) lists the available
+// commands. An example of using the Redis APPEND command is:
+//
+// n, err := conn.Do("APPEND", "key", "value")
+//
+// The Do method converts command arguments to binary strings for transmission
+// to the server as follows:
+//
+// Go Type Conversion
+// []byte Sent as is
+// string Sent as is
+// int, int64 strconv.FormatInt(v)
+// float64 strconv.FormatFloat(v, 'g', -1, 64)
+// bool true -> "1", false -> "0"
+// nil ""
+// all other types fmt.Print(v)
+//
+// Redis command reply types are represented using the following Go types:
+//
+// Redis type Go type
+// error redis.Error
+// integer int64
+// simple string string
+// bulk string []byte or nil if value not present.
+// array []interface{} or nil if value not present.
+//
+// Use type assertions or the reply helper functions to convert from
+// interface{} to the specific Go type for the command result.
+//
+// Pipelining
+//
+// Connections support pipelining using the Send, Flush and Receive methods.
+//
+// Send(commandName string, args ...interface{}) error
+// Flush() error
+// Receive() (reply interface{}, err error)
+//
+// Send writes the command to the connection's output buffer. Flush flushes the
+// connection's output buffer to the server. Receive reads a single reply from
+// the server. The following example shows a simple pipeline.
+//
+// c.Send("SET", "foo", "bar")
+// c.Send("GET", "foo")
+// c.Flush()
+// c.Receive() // reply from SET
+// v, err = c.Receive() // reply from GET
+//
+// The Do method combines the functionality of the Send, Flush and Receive
+// methods. The Do method starts by writing the command and flushing the output
+// buffer. Next, the Do method receives all pending replies including the reply
+// for the command just sent by Do. If any of the received replies is an error,
+// then Do returns the error. If there are no errors, then Do returns the last
+// reply. If the command argument to the Do method is "", then the Do method
+// will flush the output buffer and receive pending replies without sending a
+// command.
+//
+// Use the Send and Do methods to implement pipelined transactions.
+//
+// c.Send("MULTI")
+// c.Send("INCR", "foo")
+// c.Send("INCR", "bar")
+// r, err := c.Do("EXEC")
+// fmt.Println(r) // prints [1, 1]
+//
+// Concurrency
+//
+// Connections do not support concurrent calls to the write methods (Send,
+// Flush) or concurrent calls to the read method (Receive). Connections do
+// allow a concurrent reader and writer.
+//
+// Because the Do method combines the functionality of Send, Flush and Receive,
+// the Do method cannot be called concurrently with the other methods.
+//
+// For full concurrent access to Redis, use the thread-safe Pool to get and
+// release connections from within a goroutine.
+//
+// Publish and Subscribe
+//
+// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
+//
+// c.Send("SUBSCRIBE", "example")
+// c.Flush()
+// for {
+// reply, err := c.Receive()
+// if err != nil {
+// return err
+// }
+// // process pushed message
+// }
+//
+// The PubSubConn type wraps a Conn with convenience methods for implementing
+// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
+// send and flush a subscription management command. The receive method
+// converts a pushed message to convenient types for use in a type switch.
+//
+// psc := redis.PubSubConn{c}
+// psc.Subscribe("example")
+// for {
+// switch v := psc.Receive().(type) {
+// case redis.Message:
+// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
+// case redis.Subscription:
+// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
+// case error:
+// return v
+// }
+// }
+//
+// Reply Helpers
+//
+// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
+// to a value of a specific type. To allow convenient wrapping of calls to the
+// connection Do and Receive methods, the functions take a second argument of
+// type error. If the error is non-nil, then the helper function returns the
+// error. If the error is nil, the function converts the reply to the specified
+// type:
+//
+// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
+// if err != nil {
+// // handle error return from c.Do or type conversion error.
+// }
+//
+// The Scan function converts elements of a array reply to Go types:
+//
+// var value1 int
+// var value2 string
+// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
+// if err != nil {
+// // handle error
+// }
+// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
+// // handle error
+// }
+package redis
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go
new file mode 100644
index 0000000..129b86d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go
@@ -0,0 +1,117 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+)
+
+// NewLoggingConn returns a logging wrapper around a connection.
+func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
+ if prefix != "" {
+ prefix = prefix + "."
+ }
+ return &loggingConn{conn, logger, prefix}
+}
+
+type loggingConn struct {
+ Conn
+ logger *log.Logger
+ prefix string
+}
+
+func (c *loggingConn) Close() error {
+ err := c.Conn.Close()
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
+ c.logger.Output(2, buf.String())
+ return err
+}
+
+func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
+ const chop = 32
+ switch v := v.(type) {
+ case []byte:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case string:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case []interface{}:
+ if len(v) == 0 {
+ buf.WriteString("[]")
+ } else {
+ sep := "["
+ fin := "]"
+ if len(v) > chop {
+ v = v[:chop]
+ fin = "...]"
+ }
+ for _, vv := range v {
+ buf.WriteString(sep)
+ c.printValue(buf, vv)
+ sep = ", "
+ }
+ buf.WriteString(fin)
+ }
+ default:
+ fmt.Fprint(buf, v)
+ }
+}
+
+func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
+ if method != "Receive" {
+ buf.WriteString(commandName)
+ for _, arg := range args {
+ buf.WriteString(", ")
+ c.printValue(&buf, arg)
+ }
+ }
+ buf.WriteString(") -> (")
+ if method != "Send" {
+ c.printValue(&buf, reply)
+ buf.WriteString(", ")
+ }
+ fmt.Fprintf(&buf, "%v)", err)
+ c.logger.Output(3, buf.String())
+}
+
+func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := c.Conn.Do(commandName, args...)
+ c.print("Do", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) Send(commandName string, args ...interface{}) error {
+ err := c.Conn.Send(commandName, args...)
+ c.print("Send", commandName, args, nil, err)
+ return err
+}
+
+func (c *loggingConn) Receive() (interface{}, error) {
+ reply, err := c.Conn.Receive()
+ c.print("Receive", "", nil, reply, err)
+ return reply, err
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go
new file mode 100644
index 0000000..9daf2e3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go
@@ -0,0 +1,389 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "container/list"
+ "crypto/rand"
+ "crypto/sha1"
+ "errors"
+ "io"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/garyburd/redigo/internal"
+)
+
+var nowFunc = time.Now // for testing
+
+// ErrPoolExhausted is returned from a pool connection method (Do, Send,
+// Receive, Flush, Err) when the maximum number of database connections in the
+// pool has been reached.
+var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
+
+var (
+ errPoolClosed = errors.New("redigo: connection pool closed")
+ errConnClosed = errors.New("redigo: connection closed")
+)
+
+// Pool maintains a pool of connections. The application calls the Get method
+// to get a connection from the pool and the connection's Close method to
+// return the connection's resources to the pool.
+//
+// The following example shows how to use a pool in a web application. The
+// application creates a pool at application startup and makes it available to
+// request handlers using a global variable.
+//
+// func newPool(server, password string) *redis.Pool {
+// return &redis.Pool{
+// MaxIdle: 3,
+// IdleTimeout: 240 * time.Second,
+// Dial: func () (redis.Conn, error) {
+// c, err := redis.Dial("tcp", server)
+// if err != nil {
+// return nil, err
+// }
+// if _, err := c.Do("AUTH", password); err != nil {
+// c.Close()
+// return nil, err
+// }
+// return c, err
+// },
+// TestOnBorrow: func(c redis.Conn, t time.Time) error {
+// _, err := c.Do("PING")
+// return err
+// },
+// }
+// }
+//
+// var (
+// pool *redis.Pool
+// redisServer = flag.String("redisServer", ":6379", "")
+// redisPassword = flag.String("redisPassword", "", "")
+// )
+//
+// func main() {
+// flag.Parse()
+// pool = newPool(*redisServer, *redisPassword)
+// ...
+// }
+//
+// A request handler gets a connection from the pool and closes the connection
+// when the handler is done:
+//
+// func serveHome(w http.ResponseWriter, r *http.Request) {
+// conn := pool.Get()
+// defer conn.Close()
+// ....
+// }
+//
+type Pool struct {
+
+ // Dial is an application supplied function for creating and configuring a
+ // connection
+ Dial func() (Conn, error)
+
+ // TestOnBorrow is an optional application supplied function for checking
+ // the health of an idle connection before the connection is used again by
+ // the application. Argument t is the time that the connection was returned
+ // to the pool. If the function returns an error, then the connection is
+ // closed.
+ TestOnBorrow func(c Conn, t time.Time) error
+
+ // Maximum number of idle connections in the pool.
+ MaxIdle int
+
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActive int
+
+ // Close connections after remaining idle for this duration. If the value
+ // is zero, then idle connections are not closed. Applications should set
+ // the timeout to a value less than the server's timeout.
+ IdleTimeout time.Duration
+
+ // If Wait is true and the pool is at the MaxIdle limit, then Get() waits
+ // for a connection to be returned to the pool before returning.
+ Wait bool
+
+ // mu protects fields defined below.
+ mu sync.Mutex
+ cond *sync.Cond
+ closed bool
+ active int
+
+ // Stack of idleConn with most recently used at the front.
+ idle list.List
+}
+
+type idleConn struct {
+ c Conn
+ t time.Time
+}
+
+// NewPool creates a new pool. This function is deprecated. Applications should
+// initialize the Pool fields directly as shown in example.
+func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
+ return &Pool{Dial: newFn, MaxIdle: maxIdle}
+}
+
+// Get gets a connection. The application must close the returned connection.
+// This method always returns a valid connection so that applications can defer
+// error handling to the first use of the connection. If there is an error
+// getting an underlying connection, then the connection Err, Do, Send, Flush
+// and Receive methods return that error.
+func (p *Pool) Get() Conn {
+ c, err := p.get()
+ if err != nil {
+ return errorConnection{err}
+ }
+ return &pooledConnection{p: p, c: c}
+}
+
+// ActiveCount returns the number of active connections in the pool.
+func (p *Pool) ActiveCount() int {
+ p.mu.Lock()
+ active := p.active
+ p.mu.Unlock()
+ return active
+}
+
+// Close releases the resources used by the pool.
+func (p *Pool) Close() error {
+ p.mu.Lock()
+ idle := p.idle
+ p.idle.Init()
+ p.closed = true
+ p.active -= idle.Len()
+ if p.cond != nil {
+ p.cond.Broadcast()
+ }
+ p.mu.Unlock()
+ for e := idle.Front(); e != nil; e = e.Next() {
+ e.Value.(idleConn).c.Close()
+ }
+ return nil
+}
+
+// release decrements the active count and signals waiters. The caller must
+// hold p.mu during the call.
+func (p *Pool) release() {
+ p.active -= 1
+ if p.cond != nil {
+ p.cond.Signal()
+ }
+}
+
+// get prunes stale connections and returns a connection from the idle list or
+// creates a new connection.
+func (p *Pool) get() (Conn, error) {
+ p.mu.Lock()
+
+ // Prune stale connections.
+
+ if timeout := p.IdleTimeout; timeout > 0 {
+ for i, n := 0, p.idle.Len(); i < n; i++ {
+ e := p.idle.Back()
+ if e == nil {
+ break
+ }
+ ic := e.Value.(idleConn)
+ if ic.t.Add(timeout).After(nowFunc()) {
+ break
+ }
+ p.idle.Remove(e)
+ p.release()
+ p.mu.Unlock()
+ ic.c.Close()
+ p.mu.Lock()
+ }
+ }
+
+ for {
+
+ // Get idle connection.
+
+ for i, n := 0, p.idle.Len(); i < n; i++ {
+ e := p.idle.Front()
+ if e == nil {
+ break
+ }
+ ic := e.Value.(idleConn)
+ p.idle.Remove(e)
+ test := p.TestOnBorrow
+ p.mu.Unlock()
+ if test == nil || test(ic.c, ic.t) == nil {
+ return ic.c, nil
+ }
+ ic.c.Close()
+ p.mu.Lock()
+ p.release()
+ }
+
+ // Check for pool closed before dialing a new connection.
+
+ if p.closed {
+ p.mu.Unlock()
+ return nil, errors.New("redigo: get on closed pool")
+ }
+
+ // Dial new connection if under limit.
+
+ if p.MaxActive == 0 || p.active < p.MaxActive {
+ dial := p.Dial
+ p.active += 1
+ p.mu.Unlock()
+ c, err := dial()
+ if err != nil {
+ p.mu.Lock()
+ p.release()
+ p.mu.Unlock()
+ c = nil
+ }
+ return c, err
+ }
+
+ if !p.Wait {
+ p.mu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+
+ if p.cond == nil {
+ p.cond = sync.NewCond(&p.mu)
+ }
+ p.cond.Wait()
+ }
+}
+
+func (p *Pool) put(c Conn, forceClose bool) error {
+ err := c.Err()
+ p.mu.Lock()
+ if !p.closed && err == nil && !forceClose {
+ p.idle.PushFront(idleConn{t: nowFunc(), c: c})
+ if p.idle.Len() > p.MaxIdle {
+ c = p.idle.Remove(p.idle.Back()).(idleConn).c
+ } else {
+ c = nil
+ }
+ }
+
+ if c == nil {
+ if p.cond != nil {
+ p.cond.Signal()
+ }
+ p.mu.Unlock()
+ return nil
+ }
+
+ p.release()
+ p.mu.Unlock()
+ return c.Close()
+}
+
+type pooledConnection struct {
+ p *Pool
+ c Conn
+ state int
+}
+
+var (
+ sentinel []byte
+ sentinelOnce sync.Once
+)
+
+func initSentinel() {
+ p := make([]byte, 64)
+ if _, err := rand.Read(p); err == nil {
+ sentinel = p
+ } else {
+ h := sha1.New()
+ io.WriteString(h, "Oops, rand failed. Use time instead.")
+ io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
+ sentinel = h.Sum(nil)
+ }
+}
+
+func (pc *pooledConnection) Close() error {
+ c := pc.c
+ if _, ok := c.(errorConnection); ok {
+ return nil
+ }
+ pc.c = errorConnection{errConnClosed}
+
+ if pc.state&internal.MultiState != 0 {
+ c.Send("DISCARD")
+ pc.state &^= (internal.MultiState | internal.WatchState)
+ } else if pc.state&internal.WatchState != 0 {
+ c.Send("UNWATCH")
+ pc.state &^= internal.WatchState
+ }
+ if pc.state&internal.SubscribeState != 0 {
+ c.Send("UNSUBSCRIBE")
+ c.Send("PUNSUBSCRIBE")
+ // To detect the end of the message stream, ask the server to echo
+ // a sentinel value and read until we see that value.
+ sentinelOnce.Do(initSentinel)
+ c.Send("ECHO", sentinel)
+ c.Flush()
+ for {
+ p, err := c.Receive()
+ if err != nil {
+ break
+ }
+ if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
+ pc.state &^= internal.SubscribeState
+ break
+ }
+ }
+ }
+ c.Do("")
+ pc.p.put(c, pc.state != 0)
+ return nil
+}
+
+func (pc *pooledConnection) Err() error {
+ return pc.c.Err()
+}
+
+func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+ ci := internal.LookupCommandInfo(commandName)
+ pc.state = (pc.state | ci.Set) &^ ci.Clear
+ return pc.c.Do(commandName, args...)
+}
+
+func (pc *pooledConnection) Send(commandName string, args ...interface{}) error {
+ ci := internal.LookupCommandInfo(commandName)
+ pc.state = (pc.state | ci.Set) &^ ci.Clear
+ return pc.c.Send(commandName, args...)
+}
+
+func (pc *pooledConnection) Flush() error {
+ return pc.c.Flush()
+}
+
+func (pc *pooledConnection) Receive() (reply interface{}, err error) {
+ return pc.c.Receive()
+}
+
+type errorConnection struct{ err error }
+
+func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
+func (ec errorConnection) Send(string, ...interface{}) error { return ec.err }
+func (ec errorConnection) Err() error { return ec.err }
+func (ec errorConnection) Close() error { return ec.err }
+func (ec errorConnection) Flush() error { return ec.err }
+func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err }
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go
new file mode 100644
index 0000000..f079042
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go
@@ -0,0 +1,129 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+)
+
+// Subscription represents a subscribe or unsubscribe notification.
+type Subscription struct {
+
+ // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
+ Kind string
+
+ // The channel that was changed.
+ Channel string
+
+ // The current number of subscriptions for connection.
+ Count int
+}
+
+// Message represents a message notification.
+type Message struct {
+
+ // The originating channel.
+ Channel string
+
+ // The message data.
+ Data []byte
+}
+
+// PMessage represents a pmessage notification.
+type PMessage struct {
+
+ // The matched pattern.
+ Pattern string
+
+ // The originating channel.
+ Channel string
+
+ // The message data.
+ Data []byte
+}
+
+// PubSubConn wraps a Conn with convenience methods for subscribers.
+type PubSubConn struct {
+ Conn Conn
+}
+
+// Close closes the connection.
+func (c PubSubConn) Close() error {
+ return c.Conn.Close()
+}
+
+// Subscribe subscribes the connection to the specified channels.
+func (c PubSubConn) Subscribe(channel ...interface{}) error {
+ c.Conn.Send("SUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PSubscribe subscribes the connection to the given patterns.
+func (c PubSubConn) PSubscribe(channel ...interface{}) error {
+ c.Conn.Send("PSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Unsubscribe unsubscribes the connection from the given channels, or from all
+// of them if none is given.
+func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
+ c.Conn.Send("UNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PUnsubscribe unsubscribes the connection from the given patterns, or from all
+// of them if none is given.
+func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
+ c.Conn.Send("PUNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Receive returns a pushed message as a Subscription, Message, PMessage or
+// error. The return value is intended to be used directly in a type switch as
+// illustrated in the PubSubConn example.
+func (c PubSubConn) Receive() interface{} {
+ reply, err := Values(c.Conn.Receive())
+ if err != nil {
+ return err
+ }
+
+ var kind string
+ reply, err = Scan(reply, &kind)
+ if err != nil {
+ return err
+ }
+
+ switch kind {
+ case "message":
+ var m Message
+ if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
+ return err
+ }
+ return m
+ case "pmessage":
+ var pm PMessage
+ if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil {
+ return err
+ }
+ return pm
+ case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
+ s := Subscription{Kind: kind}
+ if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
+ return err
+ }
+ return s
+ }
+ return errors.New("redigo: unknown pubsub notification")
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go
new file mode 100644
index 0000000..c90a48e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+// Error represents an error returned in a command reply.
+type Error string
+
+func (err Error) Error() string { return string(err) }
+
+// Conn represents a connection to a Redis server.
+type Conn interface {
+ // Close closes the connection.
+ Close() error
+
+ // Err returns a non-nil value if the connection is broken. The returned
+ // value is either the first non-nil value returned from the underlying
+ // network connection or a protocol parsing error. Applications should
+ // close broken connections.
+ Err() error
+
+ // Do sends a command to the server and returns the received reply.
+ Do(commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Send writes the command to the client's output buffer.
+ Send(commandName string, args ...interface{}) error
+
+ // Flush flushes the output buffer to the Redis server.
+ Flush() error
+
+ // Receive receives a single reply from the Redis server
+ Receive() (reply interface{}, err error)
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go
new file mode 100644
index 0000000..5648f93
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go
@@ -0,0 +1,312 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+// ErrNil indicates that a reply value is nil.
+var ErrNil = errors.New("redigo: nil returned")
+
+// Int is a helper that converts a command reply to an integer. If err is not
+// equal to nil, then Int returns 0, err. Otherwise, Int converts the
+// reply to an int as follows:
+//
+// Reply type Result
+// integer int(reply), nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int(reply interface{}, err error) (int, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ x := int(reply)
+ if int64(x) != reply {
+ return 0, strconv.ErrRange
+ }
+ return x, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 0)
+ return int(n), err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
+}
+
+// Int64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int64(reply interface{}, err error) (int64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
+}
+
+var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
+
+// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Uint64(reply interface{}, err error) (uint64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ if reply < 0 {
+ return 0, errNegativeInt
+ }
+ return uint64(reply), nil
+ case []byte:
+ n, err := strconv.ParseUint(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
+}
+
+// Float64 is a helper that converts a command reply to 64 bit float. If err is
+// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
+// the reply to an int as follows:
+//
+// Reply type Result
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Float64(reply interface{}, err error) (float64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ n, err := strconv.ParseFloat(string(reply), 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
+}
+
+// String is a helper that converts a command reply to a string. If err is not
+// equal to nil, then String returns "", err. Otherwise String converts the
+// reply to a string as follows:
+//
+// Reply type Result
+// bulk string string(reply), nil
+// simple string reply, nil
+// nil "", ErrNil
+// other "", error
+func String(reply interface{}, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return string(reply), nil
+ case string:
+ return reply, nil
+ case nil:
+ return "", ErrNil
+ case Error:
+ return "", reply
+ }
+ return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
+}
+
+// Bytes is a helper that converts a command reply to a slice of bytes. If err
+// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
+// the reply to a slice of bytes as follows:
+//
+// Reply type Result
+// bulk string reply, nil
+// simple string []byte(reply), nil
+// nil nil, ErrNil
+// other nil, error
+func Bytes(reply interface{}, err error) ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return reply, nil
+ case string:
+ return []byte(reply), nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
+}
+
+// Bool is a helper that converts a command reply to a boolean. If err is not
+// equal to nil, then Bool returns false, err. Otherwise Bool converts the
+// reply to boolean as follows:
+//
+// Reply type Result
+// integer value != 0, nil
+// bulk string strconv.ParseBool(reply)
+// nil false, ErrNil
+// other false, error
+func Bool(reply interface{}, err error) (bool, error) {
+ if err != nil {
+ return false, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply != 0, nil
+ case []byte:
+ return strconv.ParseBool(string(reply))
+ case nil:
+ return false, ErrNil
+ case Error:
+ return false, reply
+ }
+ return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
+}
+
+// MultiBulk is deprecated. Use Values.
+func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
+
+// Values is a helper that converts an array command reply to a []interface{}.
+// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
+// converts the reply as follows:
+//
+// Reply type Result
+// array reply, nil
+// nil nil, ErrNil
+// other nil, error
+func Values(reply interface{}, err error) ([]interface{}, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ return reply, nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
+}
+
+// Strings is a helper that converts an array command reply to a []string. If
+// err is not equal to nil, then Strings returns nil, err. Nil array items are
+// converted to "" in the output slice. Strings returns an error if an array
+// item is not a bulk string or nil.
+func Strings(reply interface{}, err error) ([]string, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ result := make([]string, len(reply))
+ for i := range reply {
+ if reply[i] == nil {
+ continue
+ }
+ p, ok := reply[i].([]byte)
+ if !ok {
+ return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i])
+ }
+ result[i] = string(p)
+ }
+ return result, nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply)
+}
+
+// Ints is a helper that converts an array command reply to a []int. If
+// err is not equal to nil, then Ints returns nil, err.
+func Ints(reply interface{}, err error) ([]int, error) {
+ var ints []int
+ if reply == nil {
+ return ints, ErrNil
+ }
+ values, err := Values(reply, err)
+ if err != nil {
+ return ints, err
+ }
+ if err := ScanSlice(values, &ints); err != nil {
+ return ints, err
+ }
+ return ints, nil
+}
+
+// StringMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
+// Requires an even number of values in result.
+func StringMap(result interface{}, err error) (map[string]string, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: StringMap expects even number of values result")
+ }
+ m := make(map[string]string, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, okKey := values[i].([]byte)
+ value, okValue := values[i+1].([]byte)
+ if !okKey || !okValue {
+ return nil, errors.New("redigo: ScanMap key not a bulk string value")
+ }
+ m[string(key)] = string(value)
+ }
+ return m, nil
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go
new file mode 100644
index 0000000..8c9cfa1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go
@@ -0,0 +1,513 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+func ensureLen(d reflect.Value, n int) {
+ if n > d.Cap() {
+ d.Set(reflect.MakeSlice(d.Type(), n, n))
+ } else {
+ d.SetLen(n)
+ }
+}
+
+func cannotConvert(d reflect.Value, s interface{}) error {
+ return fmt.Errorf("redigo: Scan cannot convert from %s to %s",
+ reflect.TypeOf(s), d.Type())
+}
+
+func convertAssignBytes(d reflect.Value, s []byte) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Float32, reflect.Float64:
+ var x float64
+ x, err = strconv.ParseFloat(string(s), d.Type().Bits())
+ d.SetFloat(x)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ var x int64
+ x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
+ d.SetInt(x)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ var x uint64
+ x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
+ d.SetUint(x)
+ case reflect.Bool:
+ var x bool
+ x, err = strconv.ParseBool(string(s))
+ d.SetBool(x)
+ case reflect.String:
+ d.SetString(string(s))
+ case reflect.Slice:
+ if d.Type().Elem().Kind() != reflect.Uint8 {
+ err = cannotConvert(d, s)
+ } else {
+ d.SetBytes(s)
+ }
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignInt(d reflect.Value, s int64) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ d.SetInt(s)
+ if d.Int() != s {
+ err = strconv.ErrRange
+ d.SetInt(0)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if s < 0 {
+ err = strconv.ErrRange
+ } else {
+ x := uint64(s)
+ d.SetUint(x)
+ if d.Uint() != x {
+ err = strconv.ErrRange
+ d.SetUint(0)
+ }
+ }
+ case reflect.Bool:
+ d.SetBool(s != 0)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignValue(d reflect.Value, s interface{}) (err error) {
+ switch s := s.(type) {
+ case []byte:
+ err = convertAssignBytes(d, s)
+ case int64:
+ err = convertAssignInt(d, s)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return err
+}
+
+func convertAssignValues(d reflect.Value, s []interface{}) error {
+ if d.Type().Kind() != reflect.Slice {
+ return cannotConvert(d, s)
+ }
+ ensureLen(d, len(s))
+ for i := 0; i < len(s); i++ {
+ if err := convertAssignValue(d.Index(i), s[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func convertAssign(d interface{}, s interface{}) (err error) {
+ // Handle the most common destination types using type switches and
+ // fall back to reflection for all other types.
+ switch s := s.(type) {
+ case nil:
+ // ingore
+ case []byte:
+ switch d := d.(type) {
+ case *string:
+ *d = string(s)
+ case *int:
+ *d, err = strconv.Atoi(string(s))
+ case *bool:
+ *d, err = strconv.ParseBool(string(s))
+ case *[]byte:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignBytes(d.Elem(), s)
+ }
+ }
+ case int64:
+ switch d := d.(type) {
+ case *int:
+ x := int(s)
+ if int64(x) != s {
+ err = strconv.ErrRange
+ x = 0
+ }
+ *d = x
+ case *bool:
+ *d = s != 0
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignInt(d.Elem(), s)
+ }
+ }
+ case []interface{}:
+ switch d := d.(type) {
+ case *[]interface{}:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignValues(d.Elem(), s)
+ }
+ }
+ case Error:
+ err = s
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ return
+}
+
+// Scan copies from src to the values pointed at by dest.
+//
+// The values pointed at by dest must be an integer, float, boolean, string,
+// []byte, interface{} or slices of these types. Scan uses the standard strconv
+// package to convert bulk strings to numeric and boolean types.
+//
+// If a dest value is nil, then the corresponding src value is skipped.
+//
+// If a src element is nil, then the corresponding dest value is not modified.
+//
+// To enable easy use of Scan in a loop, Scan returns the slice of src
+// following the copied values.
+func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
+ if len(src) < len(dest) {
+ return nil, errors.New("redigo: Scan array short")
+ }
+ var err error
+ for i, d := range dest {
+ err = convertAssign(d, src[i])
+ if err != nil {
+ break
+ }
+ }
+ return src[len(dest):], err
+}
+
+type fieldSpec struct {
+ name string
+ index []int
+ //omitEmpty bool
+}
+
+type structSpec struct {
+ m map[string]*fieldSpec
+ l []*fieldSpec
+}
+
+func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
+ return ss.m[string(name)]
+}
+
+func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ switch {
+ case f.PkgPath != "":
+ // Ignore unexported fields.
+ case f.Anonymous:
+ // TODO: Handle pointers. Requires change to decoder and
+ // protection against infinite recursion.
+ if f.Type.Kind() == reflect.Struct {
+ compileStructSpec(f.Type, depth, append(index, i), ss)
+ }
+ default:
+ fs := &fieldSpec{name: f.Name}
+ tag := f.Tag.Get("redis")
+ p := strings.Split(tag, ",")
+ if len(p) > 0 {
+ if p[0] == "-" {
+ continue
+ }
+ if len(p[0]) > 0 {
+ fs.name = p[0]
+ }
+ for _, s := range p[1:] {
+ switch s {
+ //case "omitempty":
+ // fs.omitempty = true
+ default:
+ panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name()))
+ }
+ }
+ }
+ d, found := depth[fs.name]
+ if !found {
+ d = 1 << 30
+ }
+ switch {
+ case len(index) == d:
+ // At same depth, remove from result.
+ delete(ss.m, fs.name)
+ j := 0
+ for i := 0; i < len(ss.l); i++ {
+ if fs.name != ss.l[i].name {
+ ss.l[j] = ss.l[i]
+ j += 1
+ }
+ }
+ ss.l = ss.l[:j]
+ case len(index) < d:
+ fs.index = make([]int, len(index)+1)
+ copy(fs.index, index)
+ fs.index[len(index)] = i
+ depth[fs.name] = len(index)
+ ss.m[fs.name] = fs
+ ss.l = append(ss.l, fs)
+ }
+ }
+ }
+}
+
+var (
+ structSpecMutex sync.RWMutex
+ structSpecCache = make(map[reflect.Type]*structSpec)
+ defaultFieldSpec = &fieldSpec{}
+)
+
+func structSpecForType(t reflect.Type) *structSpec {
+
+ structSpecMutex.RLock()
+ ss, found := structSpecCache[t]
+ structSpecMutex.RUnlock()
+ if found {
+ return ss
+ }
+
+ structSpecMutex.Lock()
+ defer structSpecMutex.Unlock()
+ ss, found = structSpecCache[t]
+ if found {
+ return ss
+ }
+
+ ss = &structSpec{m: make(map[string]*fieldSpec)}
+ compileStructSpec(t, make(map[string]int), nil, ss)
+ structSpecCache[t] = ss
+ return ss
+}
+
+var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct")
+
+// ScanStruct scans alternating names and values from src to a struct. The
+// HGETALL and CONFIG GET commands return replies in this format.
+//
+// ScanStruct uses exported field names to match values in the response. Use
+// 'redis' field tag to override the name:
+//
+// Field int `redis:"myName"`
+//
+// Fields with the tag redis:"-" are ignored.
+//
+// Integer, float, boolean, string and []byte fields are supported. Scan uses the
+// standard strconv package to convert bulk string values to numeric and
+// boolean types.
+//
+// If a src element is nil, then the corresponding field is not modified.
+func ScanStruct(src []interface{}, dest interface{}) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanStructValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Struct {
+ return errScanStructValue
+ }
+ ss := structSpecForType(d.Type())
+
+ if len(src)%2 != 0 {
+ return errors.New("redigo: ScanStruct expects even number of values in values")
+ }
+
+ for i := 0; i < len(src); i += 2 {
+ s := src[i+1]
+ if s == nil {
+ continue
+ }
+ name, ok := src[i].([]byte)
+ if !ok {
+ return errors.New("redigo: ScanStruct key not a bulk string value")
+ }
+ fs := ss.fieldSpec(name)
+ if fs == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var (
+ errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct")
+)
+
+// ScanSlice scans src to the slice pointed to by dest. The elements the dest
+// slice must be integer, float, boolean, string, struct or pointer to struct
+// values.
+//
+// Struct fields must be integer, float, boolean or string values. All struct
+// fields are used unless a subset is specified using fieldNames.
+func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanSliceValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Slice {
+ return errScanSliceValue
+ }
+
+ isPtr := false
+ t := d.Type().Elem()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ isPtr = true
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Struct {
+ ensureLen(d, len(src))
+ for i, s := range src {
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.Index(i), s); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ ss := structSpecForType(t)
+ fss := ss.l
+ if len(fieldNames) > 0 {
+ fss = make([]*fieldSpec, len(fieldNames))
+ for i, name := range fieldNames {
+ fss[i] = ss.m[name]
+ if fss[i] == nil {
+ return errors.New("redigo: ScanSlice bad field name " + name)
+ }
+ }
+ }
+
+ if len(fss) == 0 {
+ return errors.New("redigo: ScanSlice no struct fields")
+ }
+
+ n := len(src) / len(fss)
+ if n*len(fss) != len(src) {
+ return errors.New("redigo: ScanSlice length not a multiple of struct field count")
+ }
+
+ ensureLen(d, n)
+ for i := 0; i < n; i++ {
+ d := d.Index(i)
+ if isPtr {
+ if d.IsNil() {
+ d.Set(reflect.New(t))
+ }
+ d = d.Elem()
+ }
+ for j, fs := range fss {
+ s := src[i*len(fss)+j]
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Args is a helper for constructing command arguments from structured values.
+type Args []interface{}
+
+// Add returns the result of appending value to args.
+func (args Args) Add(value ...interface{}) Args {
+ return append(args, value...)
+}
+
+// AddFlat returns the result of appending the flattened value of v to args.
+//
+// Maps are flattened by appending the alternating keys and map values to args.
+//
+// Slices are flattened by appending the slice elements to args.
+//
+// Structs are flattened by appending the alternating names and values of
+// exported fields to args. If v is a nil struct pointer, then nothing is
+// appended. The 'redis' field tag overrides struct field names. See ScanStruct
+// for more information on the use of the 'redis' field tag.
+//
+// Other types are appended to args as is.
+func (args Args) AddFlat(v interface{}) Args {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Struct:
+ args = flattenStruct(args, rv)
+ case reflect.Slice:
+ for i := 0; i < rv.Len(); i++ {
+ args = append(args, rv.Index(i).Interface())
+ }
+ case reflect.Map:
+ for _, k := range rv.MapKeys() {
+ args = append(args, k.Interface(), rv.MapIndex(k).Interface())
+ }
+ case reflect.Ptr:
+ if rv.Type().Elem().Kind() == reflect.Struct {
+ if !rv.IsNil() {
+ args = flattenStruct(args, rv.Elem())
+ }
+ } else {
+ args = append(args, v)
+ }
+ default:
+ args = append(args, v)
+ }
+ return args
+}
+
+func flattenStruct(args Args, v reflect.Value) Args {
+ ss := structSpecForType(v.Type())
+ for _, fs := range ss.l {
+ fv := v.FieldByIndex(fs.index)
+ args = append(args, fs.name, fv.Interface())
+ }
+ return args
+}
diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go
new file mode 100644
index 0000000..78605a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go
@@ -0,0 +1,86 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+// Script encapsulates the source, hash and key count for a Lua script. See
+// http://redis.io/commands/eval for information on scripts in Redis.
+type Script struct {
+ keyCount int
+ src string
+ hash string
+}
+
+// NewScript returns a new script object. If keyCount is greater than or equal
+// to zero, then the count is automatically inserted in the EVAL command
+// argument list. If keyCount is less than zero, then the application supplies
+// the count as the first value in the keysAndArgs argument to the Do, Send and
+// SendHash methods.
+func NewScript(keyCount int, src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
+}
+
+func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
+ var args []interface{}
+ if s.keyCount < 0 {
+ args = make([]interface{}, 1+len(keysAndArgs))
+ args[0] = spec
+ copy(args[1:], keysAndArgs)
+ } else {
+ args = make([]interface{}, 2+len(keysAndArgs))
+ args[0] = spec
+ args[1] = s.keyCount
+ copy(args[2:], keysAndArgs)
+ }
+ return args
+}
+
+// Do evaluates the script. Under the covers, Do optimistically evaluates the
+// script using the EVALSHA command. If the command fails because the script is
+// not loaded, then Do evaluates the script using the EVAL command (thus
+// causing the script to load).
+func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
+ v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
+ if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
+ v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
+ }
+ return v, err
+}
+
+// SendHash evaluates the script without waiting for the reply. The script is
+// evaluated with the EVALSHA command. The application must ensure that the
+// script is loaded by a previous call to Send, Do or Load methods.
+func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
+}
+
+// Send evaluates the script without waiting for the reply.
+func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
+}
+
+// Load loads the script without evaluating it.
+func (s *Script) Load(c Conn) error {
+ _, err := c.Do("SCRIPT", "LOAD", s.src)
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/.gitignore b/Godeps/_workspace/src/github.com/gin-gonic/gin/.gitignore
new file mode 100644
index 0000000..9f48f14
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/.gitignore
@@ -0,0 +1,4 @@
+Godeps/*
+!Godeps/Godeps.json
+coverage.out
+count.out
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/.travis.yml b/Godeps/_workspace/src/github.com/gin-gonic/gin/.travis.yml
new file mode 100644
index 0000000..695f0b7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/.travis.yml
@@ -0,0 +1,22 @@
+language: go
+sudo: false
+go:
+ - 1.4
+ - 1.4.2
+ - tip
+
+script:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - go test -v -covermode=count -coverprofile=coverage.out
+
+after_success:
+ - goveralls -coverprofile=coverage.out -service=travis-ci -repotoken yFj7FrCeddvBzUaaCyG33jCLfWXeb93eA
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/acc2c57482e94b44f557
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: false # default: false
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/AUTHORS.md b/Godeps/_workspace/src/github.com/gin-gonic/gin/AUTHORS.md
new file mode 100644
index 0000000..2feaf46
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/AUTHORS.md
@@ -0,0 +1,229 @@
+List of all the awesome people working to make Gin the best Web Framework in Go.
+
+
+
+##gin 0.x series authors
+
+**Maintainer:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho)
+
+People and companies, who have contributed, in alphabetical order.
+
+**@858806258 (杰哥)**
+- Fix typo in example
+
+
+**@achedeuzot (Klemen Sever)**
+- Fix newline debug printing
+
+
+**@adammck (Adam Mckaig)**
+- Add MIT license
+
+
+**@AlexanderChen1989 (Alexander)**
+- Typos in README
+
+
+**@alexanderdidenko (Aleksandr Didenko)**
+- Add support multipart/form-data
+
+
+**@alexandernyquist (Alexander Nyquist)**
+- Using template.Must to fix multiple return issue
+- ★ Added support for OPTIONS verb
+- ★ Setting response headers before calling WriteHeader
+- Improved documentation for model binding
+- ★ Added Content.Redirect()
+- ★ Added tons of Unit tests
+
+
+**@austinheap (Austin Heap)**
+- Added travis CI integration
+
+
+**@andredublin (Andre Dublin)**
+- Fix typo in comment
+
+
+**@bredov (Ludwig Valda Vasquez)**
+- Fix html templating in debug mode
+
+
+**@bluele (Jun Kimura)**
+- Fixes code examples in README
+
+
+**@chad-russell**
+- ★ Support for serializing gin.H into XML
+
+
+**@dickeyxxx (Jeff Dickey)**
+- Typos in README
+- Add example about serving static files
+
+
+**@donileo (Adonis)**
+- Add NoMethod handler
+
+
+**@dutchcoders (DutchCoders)**
+- ★ Fix security bug that allows client to spoof ip
+- Fix typo. r.HTMLTemplates -> SetHTMLTemplate
+
+
+**@el3ctro- (Joshua Loper)**
+- Fix typo in example
+
+
+**@ethankan (Ethan Kan)**
+- Unsigned integers in binding
+
+
+**(Evgeny Persienko)**
+- Validate sub structures
+
+
+**@frankbille (Frank Bille)**
+- Add support for HTTP Realm Auth
+
+
+**@fmd (Fareed Dudhia)**
+- Fix typo. SetHTTPTemplate -> SetHTMLTemplate
+
+
+**@ironiridis (Christopher Harrington)**
+- Remove old reference
+
+
+**@jammie-stackhouse (Jamie Stackhouse)**
+- Add more shortcuts for router methods
+
+
+**@jasonrhansen**
+- Fix spelling and grammar errors in documentation
+
+
+**@JasonSoft (Jason Lee)**
+- Fix typo in comment
+
+
+**@joiggama (Ignacio Galindo)**
+- Add utf-8 charset header on renders
+
+
+**@julienschmidt (Julien Schmidt)**
+- gofmt the code examples
+
+
+**@kelcecil (Kel Cecil)**
+- Fix readme typo
+
+
+**@kyledinh (Kyle Dinh)**
+- Adds RunTLS()
+
+
+**@LinusU (Linus Unnebäck)**
+- Small fixes in README
+
+
+**@loongmxbt (Saint Asky)**
+- Fix typo in example
+
+
+**@lucas-clemente (Lucas Clemente)**
+- ★ work around path.Join removing trailing slashes from routes
+
+
+**@mattn (Yasuhiro Matsumoto)**
+- Improve color logger
+
+
+**@mdigger (Dmitry Sedykh)**
+- Fixes Form binding when content-type is x-www-form-urlencoded
+- No repeat call c.Writer.Status() in gin.Logger
+- Fixes Content-Type for json render
+
+
+**@mirzac (Mirza Ceric)**
+- Fix debug printing
+
+
+**@mopemope (Yutaka Matsubara)**
+- ★ Adds Godep support (Dependencies Manager)
+- Fix variadic parameter in the flexible render API
+- Fix Corrupted plain render
+- Add Pluggable View Renderer Example
+
+
+**@msemenistyi (Mykyta Semenistyi)**
+- update Readme.md. Add code to String method
+
+
+**@msoedov (Sasha Myasoedov)**
+- ★ Adds tons of unit tests.
+
+
+**@ngerakines (Nick Gerakines)**
+- ★ Improves API, c.GET() doesn't panic
+- Adds MustGet() method
+
+
+**@r8k (Rajiv Kilaparti)**
+- Fix Port usage in README.
+
+
+**@rayrod2030 (Ray Rodriguez)**
+- Fix typo in example
+
+
+**@rns**
+- Fix typo in example
+
+
+**@RobAWilkinson (Robert Wilkinson)**
+- Add example of forms and params
+
+
+**@rogierlommers (Rogier Lommers)**
+- Add updated static serve example
+
+
+**@se77en (Damon Zhao)**
+- Improve color logging
+
+
+**@silasb (Silas Baronda)**
+- Fixing quotes in README
+
+
+**@SkuliOskarsson (Skuli Oskarsson)**
+- Fixes some texts in README II
+
+
+**@slimmy (Jimmy Pettersson)**
+- Added messages for required bindings
+
+
+**@smira (Andrey Smirnov)**
+- Add support for ignored/unexported fields in binding
+
+
+**@superalsrk (SRK.Lyu)**
+- Update httprouter godeps
+
+
+**@tebeka (Miki Tebeka)**
+- Use net/http constants instead of numeric values
+
+
+**@techjanitor**
+- Update context.go reserved IPs
+
+
+**@yosssi (Keiji Yoshida)**
+- Fix link in README
+
+
+**@yuyabee**
+- Fixed README
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/BENCHMARKS.md b/Godeps/_workspace/src/github.com/gin-gonic/gin/BENCHMARKS.md
new file mode 100644
index 0000000..181f75b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/BENCHMARKS.md
@@ -0,0 +1,298 @@
+**Machine:** intel i7 ivy bridge quad-core. 8GB RAM.
+**Date:** June 4th, 2015
+[https://github.com/gin-gonic/go-http-routing-benchmark](https://github.com/gin-gonic/go-http-routing-benchmark)
+
+```
+BenchmarkAce_Param 5000000 372 ns/op 32 B/op 1 allocs/op
+BenchmarkBear_Param 1000000 1165 ns/op 424 B/op 5 allocs/op
+BenchmarkBeego_Param 1000000 2440 ns/op 720 B/op 10 allocs/op
+BenchmarkBone_Param 1000000 1067 ns/op 384 B/op 3 allocs/op
+BenchmarkDenco_Param 5000000 240 ns/op 32 B/op 1 allocs/op
+BenchmarkEcho_Param 10000000 130 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_Param 10000000 133 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_Param 1000000 1826 ns/op 656 B/op 9 allocs/op
+BenchmarkGoji_Param 2000000 957 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_Param 1000000 2021 ns/op 657 B/op 14 allocs/op
+BenchmarkGoRestful_Param 200000 8825 ns/op 2496 B/op 31 allocs/op
+BenchmarkGorillaMux_Param 500000 3340 ns/op 784 B/op 9 allocs/op
+BenchmarkHttpRouter_Param 10000000 152 ns/op 32 B/op 1 allocs/op
+BenchmarkHttpTreeMux_Param 2000000 717 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_Param 3000000 423 ns/op 56 B/op 3 allocs/op
+BenchmarkMacaron_Param 1000000 3410 ns/op 1104 B/op 11 allocs/op
+BenchmarkMartini_Param 200000 7101 ns/op 1152 B/op 12 allocs/op
+BenchmarkPat_Param 1000000 2040 ns/op 656 B/op 14 allocs/op
+BenchmarkPossum_Param 1000000 2048 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_Param 1000000 1144 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_Param 200000 6725 ns/op 1672 B/op 28 allocs/op
+BenchmarkRivet_Param 1000000 1121 ns/op 464 B/op 5 allocs/op
+BenchmarkTango_Param 1000000 1479 ns/op 256 B/op 10 allocs/op
+BenchmarkTigerTonic_Param 1000000 3393 ns/op 992 B/op 19 allocs/op
+BenchmarkTraffic_Param 300000 5525 ns/op 1984 B/op 23 allocs/op
+BenchmarkVulcan_Param 2000000 924 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_Param 1000000 1084 ns/op 368 B/op 3 allocs/op
+BenchmarkAce_Param5 3000000 614 ns/op 160 B/op 1 allocs/op
+BenchmarkBear_Param5 1000000 1617 ns/op 469 B/op 5 allocs/op
+BenchmarkBeego_Param5 1000000 3373 ns/op 992 B/op 13 allocs/op
+BenchmarkBone_Param5 1000000 1478 ns/op 432 B/op 3 allocs/op
+BenchmarkDenco_Param5 3000000 570 ns/op 160 B/op 1 allocs/op
+BenchmarkEcho_Param5 5000000 256 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_Param5 10000000 222 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_Param5 1000000 2789 ns/op 928 B/op 12 allocs/op
+BenchmarkGoji_Param5 1000000 1287 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_Param5 1000000 3670 ns/op 1105 B/op 17 allocs/op
+BenchmarkGoRestful_Param5 200000 10756 ns/op 2672 B/op 31 allocs/op
+BenchmarkGorillaMux_Param5 300000 5543 ns/op 912 B/op 9 allocs/op
+BenchmarkHttpRouter_Param5 5000000 403 ns/op 160 B/op 1 allocs/op
+BenchmarkHttpTreeMux_Param5 1000000 1089 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_Param5 1000000 1682 ns/op 440 B/op 10 allocs/op
+BenchmarkMacaron_Param5 300000 4596 ns/op 1376 B/op 14 allocs/op
+BenchmarkMartini_Param5 100000 15703 ns/op 1280 B/op 12 allocs/op
+BenchmarkPat_Param5 300000 5320 ns/op 1008 B/op 42 allocs/op
+BenchmarkPossum_Param5 1000000 2155 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_Param5 1000000 1559 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_Param5 200000 8184 ns/op 2024 B/op 35 allocs/op
+BenchmarkRivet_Param5 1000000 1914 ns/op 528 B/op 9 allocs/op
+BenchmarkTango_Param5 1000000 3280 ns/op 944 B/op 18 allocs/op
+BenchmarkTigerTonic_Param5 200000 11638 ns/op 2519 B/op 53 allocs/op
+BenchmarkTraffic_Param5 200000 8941 ns/op 2280 B/op 31 allocs/op
+BenchmarkVulcan_Param5 1000000 1279 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_Param5 1000000 1574 ns/op 416 B/op 3 allocs/op
+BenchmarkAce_Param20 1000000 1528 ns/op 640 B/op 1 allocs/op
+BenchmarkBear_Param20 300000 4906 ns/op 1633 B/op 5 allocs/op
+BenchmarkBeego_Param20 200000 10529 ns/op 3868 B/op 17 allocs/op
+BenchmarkBone_Param20 300000 7362 ns/op 2539 B/op 5 allocs/op
+BenchmarkDenco_Param20 1000000 1884 ns/op 640 B/op 1 allocs/op
+BenchmarkEcho_Param20 2000000 689 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_Param20 3000000 545 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_Param20 200000 9437 ns/op 3804 B/op 16 allocs/op
+BenchmarkGoji_Param20 500000 3987 ns/op 1246 B/op 2 allocs/op
+BenchmarkGoJsonRest_Param20 100000 12799 ns/op 4492 B/op 21 allocs/op
+BenchmarkGoRestful_Param20 100000 19451 ns/op 5244 B/op 33 allocs/op
+BenchmarkGorillaMux_Param20 100000 12456 ns/op 3275 B/op 11 allocs/op
+BenchmarkHttpRouter_Param20 1000000 1333 ns/op 640 B/op 1 allocs/op
+BenchmarkHttpTreeMux_Param20 300000 6490 ns/op 2187 B/op 4 allocs/op
+BenchmarkKocha_Param20 300000 5335 ns/op 1808 B/op 27 allocs/op
+BenchmarkMacaron_Param20 200000 11325 ns/op 4252 B/op 18 allocs/op
+BenchmarkMartini_Param20 20000 64419 ns/op 3644 B/op 14 allocs/op
+BenchmarkPat_Param20 50000 24672 ns/op 4888 B/op 151 allocs/op
+BenchmarkPossum_Param20 1000000 2085 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_Param20 300000 6809 ns/op 2283 B/op 8 allocs/op
+BenchmarkRevel_Param20 100000 16600 ns/op 5551 B/op 54 allocs/op
+BenchmarkRivet_Param20 200000 8428 ns/op 2620 B/op 26 allocs/op
+BenchmarkTango_Param20 100000 16302 ns/op 8224 B/op 48 allocs/op
+BenchmarkTigerTonic_Param20 30000 46828 ns/op 10538 B/op 178 allocs/op
+BenchmarkTraffic_Param20 50000 28871 ns/op 7998 B/op 66 allocs/op
+BenchmarkVulcan_Param20 1000000 2267 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_Param20 300000 6828 ns/op 2507 B/op 5 allocs/op
+BenchmarkAce_ParamWrite 3000000 502 ns/op 40 B/op 2 allocs/op
+BenchmarkBear_ParamWrite 1000000 1303 ns/op 424 B/op 5 allocs/op
+BenchmarkBeego_ParamWrite 1000000 2489 ns/op 728 B/op 11 allocs/op
+BenchmarkBone_ParamWrite 1000000 1181 ns/op 384 B/op 3 allocs/op
+BenchmarkDenco_ParamWrite 5000000 315 ns/op 32 B/op 1 allocs/op
+BenchmarkEcho_ParamWrite 10000000 237 ns/op 8 B/op 1 allocs/op
+BenchmarkGin_ParamWrite 5000000 336 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_ParamWrite 1000000 2079 ns/op 664 B/op 10 allocs/op
+BenchmarkGoji_ParamWrite 1000000 1092 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_ParamWrite 1000000 3329 ns/op 1136 B/op 19 allocs/op
+BenchmarkGoRestful_ParamWrite 200000 9273 ns/op 2504 B/op 32 allocs/op
+BenchmarkGorillaMux_ParamWrite 500000 3919 ns/op 792 B/op 10 allocs/op
+BenchmarkHttpRouter_ParamWrite 10000000 223 ns/op 32 B/op 1 allocs/op
+BenchmarkHttpTreeMux_ParamWrite 2000000 788 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_ParamWrite 3000000 549 ns/op 56 B/op 3 allocs/op
+BenchmarkMacaron_ParamWrite 500000 4558 ns/op 1216 B/op 16 allocs/op
+BenchmarkMartini_ParamWrite 200000 8850 ns/op 1256 B/op 16 allocs/op
+BenchmarkPat_ParamWrite 500000 3679 ns/op 1088 B/op 19 allocs/op
+BenchmarkPossum_ParamWrite 1000000 2114 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_ParamWrite 1000000 1320 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_ParamWrite 200000 8048 ns/op 2128 B/op 33 allocs/op
+BenchmarkRivet_ParamWrite 1000000 1393 ns/op 472 B/op 6 allocs/op
+BenchmarkTango_ParamWrite 2000000 819 ns/op 136 B/op 5 allocs/op
+BenchmarkTigerTonic_ParamWrite 300000 5860 ns/op 1440 B/op 25 allocs/op
+BenchmarkTraffic_ParamWrite 200000 7429 ns/op 2400 B/op 27 allocs/op
+BenchmarkVulcan_ParamWrite 2000000 972 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_ParamWrite 1000000 1226 ns/op 368 B/op 3 allocs/op
+BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op
+BenchmarkBear_GithubStatic 3000000 575 ns/op 88 B/op 3 allocs/op
+BenchmarkBeego_GithubStatic 1000000 1561 ns/op 368 B/op 7 allocs/op
+BenchmarkBone_GithubStatic 200000 12301 ns/op 2880 B/op 60 allocs/op
+BenchmarkDenco_GithubStatic 20000000 74.6 ns/op 0 B/op 0 allocs/op
+BenchmarkEcho_GithubStatic 10000000 176 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GithubStatic 10000000 159 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GithubStatic 1000000 1116 ns/op 304 B/op 6 allocs/op
+BenchmarkGoji_GithubStatic 5000000 413 ns/op 0 B/op 0 allocs/op
+BenchmarkGoRestful_GithubStatic 30000 55200 ns/op 3520 B/op 36 allocs/op
+BenchmarkGoJsonRest_GithubStatic 1000000 1504 ns/op 337 B/op 12 allocs/op
+BenchmarkGorillaMux_GithubStatic 100000 23620 ns/op 464 B/op 8 allocs/op
+BenchmarkHttpRouter_GithubStatic 20000000 78.3 ns/op 0 B/op 0 allocs/op
+BenchmarkHttpTreeMux_GithubStatic 20000000 84.9 ns/op 0 B/op 0 allocs/op
+BenchmarkKocha_GithubStatic 20000000 111 ns/op 0 B/op 0 allocs/op
+BenchmarkMacaron_GithubStatic 1000000 2686 ns/op 752 B/op 8 allocs/op
+BenchmarkMartini_GithubStatic 100000 22244 ns/op 832 B/op 11 allocs/op
+BenchmarkPat_GithubStatic 100000 13278 ns/op 3648 B/op 76 allocs/op
+BenchmarkPossum_GithubStatic 1000000 1429 ns/op 480 B/op 4 allocs/op
+BenchmarkR2router_GithubStatic 2000000 726 ns/op 144 B/op 5 allocs/op
+BenchmarkRevel_GithubStatic 300000 6271 ns/op 1288 B/op 25 allocs/op
+BenchmarkRivet_GithubStatic 3000000 474 ns/op 112 B/op 2 allocs/op
+BenchmarkTango_GithubStatic 1000000 1842 ns/op 256 B/op 10 allocs/op
+BenchmarkTigerTonic_GithubStatic 5000000 361 ns/op 48 B/op 1 allocs/op
+BenchmarkTraffic_GithubStatic 30000 47197 ns/op 18920 B/op 149 allocs/op
+BenchmarkVulcan_GithubStatic 1000000 1415 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GithubStatic 1000000 2522 ns/op 512 B/op 11 allocs/op
+BenchmarkAce_GithubParam 3000000 578 ns/op 96 B/op 1 allocs/op
+BenchmarkBear_GithubParam 1000000 1592 ns/op 464 B/op 5 allocs/op
+BenchmarkBeego_GithubParam 1000000 2891 ns/op 784 B/op 11 allocs/op
+BenchmarkBone_GithubParam 300000 6440 ns/op 1456 B/op 16 allocs/op
+BenchmarkDenco_GithubParam 3000000 514 ns/op 128 B/op 1 allocs/op
+BenchmarkEcho_GithubParam 5000000 292 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GithubParam 10000000 242 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GithubParam 1000000 2343 ns/op 720 B/op 10 allocs/op
+BenchmarkGoji_GithubParam 1000000 1566 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_GithubParam 1000000 2828 ns/op 721 B/op 15 allocs/op
+BenchmarkGoRestful_GithubParam 10000 177711 ns/op 2816 B/op 35 allocs/op
+BenchmarkGorillaMux_GithubParam 100000 13591 ns/op 816 B/op 9 allocs/op
+BenchmarkHttpRouter_GithubParam 5000000 352 ns/op 96 B/op 1 allocs/op
+BenchmarkHttpTreeMux_GithubParam 2000000 973 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_GithubParam 2000000 889 ns/op 128 B/op 5 allocs/op
+BenchmarkMacaron_GithubParam 500000 4047 ns/op 1168 B/op 12 allocs/op
+BenchmarkMartini_GithubParam 50000 28982 ns/op 1184 B/op 12 allocs/op
+BenchmarkPat_GithubParam 200000 8747 ns/op 2480 B/op 56 allocs/op
+BenchmarkPossum_GithubParam 1000000 2158 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_GithubParam 1000000 1352 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_GithubParam 200000 7673 ns/op 1784 B/op 30 allocs/op
+BenchmarkRivet_GithubParam 1000000 1573 ns/op 480 B/op 6 allocs/op
+BenchmarkTango_GithubParam 1000000 2418 ns/op 480 B/op 13 allocs/op
+BenchmarkTigerTonic_GithubParam 300000 6048 ns/op 1440 B/op 28 allocs/op
+BenchmarkTraffic_GithubParam 100000 20143 ns/op 6024 B/op 55 allocs/op
+BenchmarkVulcan_GithubParam 1000000 2224 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GithubParam 500000 4156 ns/op 1312 B/op 12 allocs/op
+BenchmarkAce_GithubAll 10000 109482 ns/op 13792 B/op 167 allocs/op
+BenchmarkBear_GithubAll 10000 287490 ns/op 79952 B/op 943 allocs/op
+BenchmarkBeego_GithubAll 3000 562184 ns/op 146272 B/op 2092 allocs/op
+BenchmarkBone_GithubAll 500 2578716 ns/op 648016 B/op 8119 allocs/op
+BenchmarkDenco_GithubAll 20000 94955 ns/op 20224 B/op 167 allocs/op
+BenchmarkEcho_GithubAll 30000 58705 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GithubAll 30000 50991 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GithubAll 5000 449648 ns/op 133280 B/op 1889 allocs/op
+BenchmarkGoji_GithubAll 2000 689748 ns/op 56113 B/op 334 allocs/op
+BenchmarkGoJsonRest_GithubAll 5000 537769 ns/op 135995 B/op 2940 allocs/op
+BenchmarkGoRestful_GithubAll 100 18410628 ns/op 797236 B/op 7725 allocs/op
+BenchmarkGorillaMux_GithubAll 200 8036360 ns/op 153137 B/op 1791 allocs/op
+BenchmarkHttpRouter_GithubAll 20000 63506 ns/op 13792 B/op 167 allocs/op
+BenchmarkHttpTreeMux_GithubAll 10000 165927 ns/op 56112 B/op 334 allocs/op
+BenchmarkKocha_GithubAll 10000 171362 ns/op 23304 B/op 843 allocs/op
+BenchmarkMacaron_GithubAll 2000 817008 ns/op 224960 B/op 2315 allocs/op
+BenchmarkMartini_GithubAll 100 12609209 ns/op 237952 B/op 2686 allocs/op
+BenchmarkPat_GithubAll 300 4830398 ns/op 1504101 B/op 32222 allocs/op
+BenchmarkPossum_GithubAll 10000 301716 ns/op 97440 B/op 812 allocs/op
+BenchmarkR2router_GithubAll 10000 270691 ns/op 77328 B/op 1182 allocs/op
+BenchmarkRevel_GithubAll 1000 1491919 ns/op 345553 B/op 5918 allocs/op
+BenchmarkRivet_GithubAll 10000 283860 ns/op 84272 B/op 1079 allocs/op
+BenchmarkTango_GithubAll 5000 473821 ns/op 87078 B/op 2470 allocs/op
+BenchmarkTigerTonic_GithubAll 2000 1120131 ns/op 241088 B/op 6052 allocs/op
+BenchmarkTraffic_GithubAll 200 8708979 ns/op 2664762 B/op 22390 allocs/op
+BenchmarkVulcan_GithubAll 5000 353392 ns/op 19894 B/op 609 allocs/op
+BenchmarkZeus_GithubAll 2000 944234 ns/op 300688 B/op 2648 allocs/op
+BenchmarkAce_GPlusStatic 5000000 251 ns/op 0 B/op 0 allocs/op
+BenchmarkBear_GPlusStatic 3000000 415 ns/op 72 B/op 3 allocs/op
+BenchmarkBeego_GPlusStatic 1000000 1416 ns/op 352 B/op 7 allocs/op
+BenchmarkBone_GPlusStatic 10000000 192 ns/op 32 B/op 1 allocs/op
+BenchmarkDenco_GPlusStatic 30000000 47.6 ns/op 0 B/op 0 allocs/op
+BenchmarkEcho_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlusStatic 1000000 1035 ns/op 288 B/op 6 allocs/op
+BenchmarkGoji_GPlusStatic 5000000 304 ns/op 0 B/op 0 allocs/op
+BenchmarkGoJsonRest_GPlusStatic 1000000 1286 ns/op 337 B/op 12 allocs/op
+BenchmarkGoRestful_GPlusStatic 200000 9649 ns/op 2160 B/op 30 allocs/op
+BenchmarkGorillaMux_GPlusStatic 1000000 2346 ns/op 464 B/op 8 allocs/op
+BenchmarkHttpRouter_GPlusStatic 30000000 42.7 ns/op 0 B/op 0 allocs/op
+BenchmarkHttpTreeMux_GPlusStatic 30000000 49.5 ns/op 0 B/op 0 allocs/op
+BenchmarkKocha_GPlusStatic 20000000 74.8 ns/op 0 B/op 0 allocs/op
+BenchmarkMacaron_GPlusStatic 1000000 2520 ns/op 736 B/op 8 allocs/op
+BenchmarkMartini_GPlusStatic 300000 5310 ns/op 832 B/op 11 allocs/op
+BenchmarkPat_GPlusStatic 5000000 398 ns/op 96 B/op 2 allocs/op
+BenchmarkPossum_GPlusStatic 1000000 1434 ns/op 480 B/op 4 allocs/op
+BenchmarkR2router_GPlusStatic 2000000 646 ns/op 144 B/op 5 allocs/op
+BenchmarkRevel_GPlusStatic 300000 6172 ns/op 1272 B/op 25 allocs/op
+BenchmarkRivet_GPlusStatic 3000000 444 ns/op 112 B/op 2 allocs/op
+BenchmarkTango_GPlusStatic 1000000 1400 ns/op 208 B/op 10 allocs/op
+BenchmarkTigerTonic_GPlusStatic 10000000 213 ns/op 32 B/op 1 allocs/op
+BenchmarkTraffic_GPlusStatic 1000000 3091 ns/op 1208 B/op 16 allocs/op
+BenchmarkVulcan_GPlusStatic 2000000 863 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GPlusStatic 10000000 237 ns/op 16 B/op 1 allocs/op
+BenchmarkAce_GPlusParam 3000000 435 ns/op 64 B/op 1 allocs/op
+BenchmarkBear_GPlusParam 1000000 1205 ns/op 448 B/op 5 allocs/op
+BenchmarkBeego_GPlusParam 1000000 2494 ns/op 720 B/op 10 allocs/op
+BenchmarkBone_GPlusParam 1000000 1126 ns/op 384 B/op 3 allocs/op
+BenchmarkDenco_GPlusParam 5000000 325 ns/op 64 B/op 1 allocs/op
+BenchmarkEcho_GPlusParam 10000000 168 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlusParam 10000000 170 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlusParam 1000000 1895 ns/op 656 B/op 9 allocs/op
+BenchmarkGoji_GPlusParam 1000000 1071 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_GPlusParam 1000000 2282 ns/op 657 B/op 14 allocs/op
+BenchmarkGoRestful_GPlusParam 100000 19400 ns/op 2560 B/op 33 allocs/op
+BenchmarkGorillaMux_GPlusParam 500000 5001 ns/op 784 B/op 9 allocs/op
+BenchmarkHttpRouter_GPlusParam 10000000 240 ns/op 64 B/op 1 allocs/op
+BenchmarkHttpTreeMux_GPlusParam 2000000 797 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_GPlusParam 3000000 505 ns/op 56 B/op 3 allocs/op
+BenchmarkMacaron_GPlusParam 1000000 3668 ns/op 1104 B/op 11 allocs/op
+BenchmarkMartini_GPlusParam 200000 10672 ns/op 1152 B/op 12 allocs/op
+BenchmarkPat_GPlusParam 1000000 2376 ns/op 704 B/op 14 allocs/op
+BenchmarkPossum_GPlusParam 1000000 2090 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_GPlusParam 1000000 1233 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_GPlusParam 200000 6778 ns/op 1704 B/op 28 allocs/op
+BenchmarkRivet_GPlusParam 1000000 1279 ns/op 464 B/op 5 allocs/op
+BenchmarkTango_GPlusParam 1000000 1981 ns/op 272 B/op 10 allocs/op
+BenchmarkTigerTonic_GPlusParam 500000 3893 ns/op 1064 B/op 19 allocs/op
+BenchmarkTraffic_GPlusParam 200000 6585 ns/op 2000 B/op 23 allocs/op
+BenchmarkVulcan_GPlusParam 1000000 1233 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GPlusParam 1000000 1350 ns/op 368 B/op 3 allocs/op
+BenchmarkAce_GPlus2Params 3000000 512 ns/op 64 B/op 1 allocs/op
+BenchmarkBear_GPlus2Params 1000000 1564 ns/op 464 B/op 5 allocs/op
+BenchmarkBeego_GPlus2Params 1000000 3043 ns/op 784 B/op 11 allocs/op
+BenchmarkBone_GPlus2Params 1000000 3152 ns/op 736 B/op 7 allocs/op
+BenchmarkDenco_GPlus2Params 3000000 431 ns/op 64 B/op 1 allocs/op
+BenchmarkEcho_GPlus2Params 5000000 247 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlus2Params 10000000 219 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlus2Params 1000000 2363 ns/op 720 B/op 10 allocs/op
+BenchmarkGoji_GPlus2Params 1000000 1540 ns/op 336 B/op 2 allocs/op
+BenchmarkGoJsonRest_GPlus2Params 1000000 2872 ns/op 721 B/op 15 allocs/op
+BenchmarkGoRestful_GPlus2Params 100000 23030 ns/op 2720 B/op 35 allocs/op
+BenchmarkGorillaMux_GPlus2Params 200000 10516 ns/op 816 B/op 9 allocs/op
+BenchmarkHttpRouter_GPlus2Params 5000000 273 ns/op 64 B/op 1 allocs/op
+BenchmarkHttpTreeMux_GPlus2Params 2000000 939 ns/op 336 B/op 2 allocs/op
+BenchmarkKocha_GPlus2Params 2000000 844 ns/op 128 B/op 5 allocs/op
+BenchmarkMacaron_GPlus2Params 500000 3914 ns/op 1168 B/op 12 allocs/op
+BenchmarkMartini_GPlus2Params 50000 35759 ns/op 1280 B/op 16 allocs/op
+BenchmarkPat_GPlus2Params 200000 7089 ns/op 2304 B/op 41 allocs/op
+BenchmarkPossum_GPlus2Params 1000000 2093 ns/op 624 B/op 7 allocs/op
+BenchmarkR2router_GPlus2Params 1000000 1320 ns/op 432 B/op 6 allocs/op
+BenchmarkRevel_GPlus2Params 200000 7351 ns/op 1800 B/op 30 allocs/op
+BenchmarkRivet_GPlus2Params 1000000 1485 ns/op 480 B/op 6 allocs/op
+BenchmarkTango_GPlus2Params 1000000 2111 ns/op 448 B/op 12 allocs/op
+BenchmarkTigerTonic_GPlus2Params 300000 6271 ns/op 1528 B/op 28 allocs/op
+BenchmarkTraffic_GPlus2Params 100000 14886 ns/op 3312 B/op 34 allocs/op
+BenchmarkVulcan_GPlus2Params 1000000 1883 ns/op 98 B/op 3 allocs/op
+BenchmarkZeus_GPlus2Params 1000000 2686 ns/op 784 B/op 6 allocs/op
+BenchmarkAce_GPlusAll 300000 5912 ns/op 640 B/op 11 allocs/op
+BenchmarkBear_GPlusAll 100000 16448 ns/op 5072 B/op 61 allocs/op
+BenchmarkBeego_GPlusAll 50000 32916 ns/op 8976 B/op 129 allocs/op
+BenchmarkBone_GPlusAll 50000 25836 ns/op 6992 B/op 76 allocs/op
+BenchmarkDenco_GPlusAll 500000 4462 ns/op 672 B/op 11 allocs/op
+BenchmarkEcho_GPlusAll 500000 2806 ns/op 0 B/op 0 allocs/op
+BenchmarkGin_GPlusAll 500000 2579 ns/op 0 B/op 0 allocs/op
+BenchmarkGocraftWeb_GPlusAll 50000 25223 ns/op 8144 B/op 116 allocs/op
+BenchmarkGoji_GPlusAll 100000 14237 ns/op 3696 B/op 22 allocs/op
+BenchmarkGoJsonRest_GPlusAll 50000 29227 ns/op 8221 B/op 183 allocs/op
+BenchmarkGoRestful_GPlusAll 10000 203144 ns/op 36064 B/op 441 allocs/op
+BenchmarkGorillaMux_GPlusAll 20000 80906 ns/op 9712 B/op 115 allocs/op
+BenchmarkHttpRouter_GPlusAll 500000 3040 ns/op 640 B/op 11 allocs/op
+BenchmarkHttpTreeMux_GPlusAll 200000 9627 ns/op 3696 B/op 22 allocs/op
+BenchmarkKocha_GPlusAll 200000 8108 ns/op 976 B/op 43 allocs/op
+BenchmarkMacaron_GPlusAll 30000 48083 ns/op 13968 B/op 142 allocs/op
+BenchmarkMartini_GPlusAll 10000 196978 ns/op 15072 B/op 178 allocs/op
+BenchmarkPat_GPlusAll 30000 58865 ns/op 16880 B/op 343 allocs/op
+BenchmarkPossum_GPlusAll 100000 19685 ns/op 6240 B/op 52 allocs/op
+BenchmarkR2router_GPlusAll 100000 16251 ns/op 5040 B/op 76 allocs/op
+BenchmarkRevel_GPlusAll 20000 93489 ns/op 21656 B/op 368 allocs/op
+BenchmarkRivet_GPlusAll 100000 16907 ns/op 5408 B/op 64 allocs/op
+```
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/CHANGELOG.md b/Godeps/_workspace/src/github.com/gin-gonic/gin/CHANGELOG.md
new file mode 100644
index 0000000..82f1bea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/CHANGELOG.md
@@ -0,0 +1,150 @@
+#CHANGELOG
+
+###Gin 1.0rc2 (...)
+
+- [PERFORMANCE] Fast path for writing Content-Type.
+- [PERFORMANCE] Much faster 404 routing
+- [PERFORMANCE] Allocation optimizations
+- [PERFORMANCE] Faster root tree lookup
+- [PERFORMANCE] Zero overhead, String() and JSON() rendering.
+- [PERFORMANCE] Faster ClientIP parsing
+- [PERFORMANCE] Much faster SSE implementation
+- [NEW] Benchmarks suite
+- [NEW] Bind validation can be disabled and replaced with custom validators.
+- [NEW] More flexible HTML render
+- [NEW] Multipart and PostForm bindings
+- [NEW] Adds method to return all the registered routes
+- [NEW] Context.HandlerName() returns the main handler's name
+- [NEW] Adds Error.IsType() helper
+- [FIX] Binding multipart form
+- [FIX] Integration tests
+- [FIX] Crash when binding non struct object in Context.
+- [FIX] RunTLS() implementation
+- [FIX] Logger() unit tests
+- [FIX] Adds SetHTMLTemplate() warning
+- [FIX] Context.IsAborted()
+- [FIX] More unit tests
+- [FIX] JSON, XML, HTML renders accept custom content-types
+- [FIX] gin.AbortIndex is unexported
+- [FIX] Better approach to avoid directory listing in StaticFS()
+- [FIX] Context.ClientIP() always returns the IP with trimmed spaces.
+- [FIX] Better warning when running in debug mode.
+- [FIX] Google App Engine integration. debugPrint does not use os.Stdout
+- [FIX] Fixes integer overflow in error type
+- [FIX] Error implements the json.Marshaller interface
+- [FIX] MIT license in every file
+
+
+###Gin 1.0rc1 (May 22, 2015)
+
+- [PERFORMANCE] Zero allocation router
+- [PERFORMANCE] Faster JSON, XML and text rendering
+- [PERFORMANCE] Custom hand optimized HttpRouter for Gin
+- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations
+- [NEW] Built-in support for golang.org/x/net/context
+- [NEW] Any(path, handler). Create a route that matches any path
+- [NEW] Refactored rendering pipeline (faster and static typeded)
+- [NEW] Refactored errors API
+- [NEW] IndentedJSON() prints pretty JSON
+- [NEW] Added gin.DefaultWriter
+- [NEW] UNIX socket support
+- [NEW] RouterGroup.BasePath is exposed
+- [NEW] JSON validation using go-validate-yourself (very powerful options)
+- [NEW] Completed suite of unit tests
+- [NEW] HTTP streaming with c.Stream()
+- [NEW] StaticFile() creates a router for serving just one file.
+- [NEW] StaticFS() has an option to disable directory listing.
+- [NEW] StaticFS() for serving static files through virtual filesystems
+- [NEW] Server-Sent Events native support
+- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler
+- [NEW] Added LoggerWithWriter() middleware
+- [NEW] Added RecoveryWithWriter() middleware
+- [NEW] Added DefaultPostFormValue()
+- [NEW] Added DefaultFormValue()
+- [NEW] Added DefaultParamValue()
+- [FIX] BasicAuth() when using custom realm
+- [FIX] Bug when serving static files in nested routing group
+- [FIX] Redirect using built-in http.Redirect()
+- [FIX] Logger when printing the requested path
+- [FIX] Documentation typos
+- [FIX] Context.Engine renamed to Context.engine
+- [FIX] Better debugging messages
+- [FIX] ErrorLogger
+- [FIX] Debug HTTP render
+- [FIX] Refactored binding and render modules
+- [FIX] Refactored Context initialization
+- [FIX] Refactored BasicAuth()
+- [FIX] NoMethod/NoRoute handlers
+- [FIX] Hijacking http
+- [FIX] Better support for Google App Engine (using log instead of fmt)
+
+
+###Gin 0.6 (Mar 9, 2015)
+
+- [NEW] Support multipart/form-data
+- [NEW] NoMethod handler
+- [NEW] Validate sub structures
+- [NEW] Support for HTTP Realm Auth
+- [FIX] Unsigned integers in binding
+- [FIX] Improve color logger
+
+
+###Gin 0.5 (Feb 7, 2015)
+
+- [NEW] Content Negotiation
+- [FIX] Solved security bug that allow a client to spoof ip
+- [FIX] Fix unexported/ignored fields in binding
+
+
+###Gin 0.4 (Aug 21, 2014)
+
+- [NEW] Development mode
+- [NEW] Unit tests
+- [NEW] Add Content.Redirect()
+- [FIX] Deferring WriteHeader()
+- [FIX] Improved documentation for model binding
+
+
+###Gin 0.3 (Jul 18, 2014)
+
+- [PERFORMANCE] Normal log and error log are printed in the same call.
+- [PERFORMANCE] Improve performance of NoRouter()
+- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults.
+- [NEW] Flexible rendering API
+- [NEW] Add Context.File()
+- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS
+- [FIX] Rename NotFound404() to NoRoute()
+- [FIX] Errors in context are purged
+- [FIX] Adds HEAD method in Static file serving
+- [FIX] Refactors Static() file serving
+- [FIX] Using keyed initialization to fix app-engine integration
+- [FIX] Can't unmarshal JSON array, #63
+- [FIX] Renaming Context.Req to Context.Request
+- [FIX] Check application/x-www-form-urlencoded when parsing form
+
+
+###Gin 0.2b (Jul 08, 2014)
+- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead
+- [NEW] Travis CI integration
+- [NEW] Completely new logger
+- [NEW] New API for serving static files. gin.Static()
+- [NEW] gin.H() can be serialized into XML
+- [NEW] Typed errors. Errors can be typed. Internet/external/custom.
+- [NEW] Support for Godeps
+- [NEW] Travis/Godocs badges in README
+- [NEW] New Bind() and BindWith() methods for parsing request body.
+- [NEW] Add Content.Copy()
+- [NEW] Add context.LastError()
+- [NEW] Add shorcut for OPTIONS HTTP method
+- [FIX] Tons of README fixes
+- [FIX] Header is written before body
+- [FIX] BasicAuth() and changes API a little bit
+- [FIX] Recovery() middleware only prints panics
+- [FIX] Context.Get() does not panic anymore. Use MustGet() instead.
+- [FIX] Multiple http.WriteHeader() in NotFound handlers
+- [FIX] Engine.Run() panics if http server can't be setted up
+- [FIX] Crash when route path doesn't start with '/'
+- [FIX] Do not update header when status code is negative
+- [FIX] Setting response headers before calling WriteHeader in context.String()
+- [FIX] Add MIT license
+- [FIX] Changes behaviour of ErrorLogger() and Logger()
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/LICENSE b/Godeps/_workspace/src/github.com/gin-gonic/gin/LICENSE
new file mode 100644
index 0000000..1ff7f37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Manuel Martínez-Almeida
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/README.md b/Godeps/_workspace/src/github.com/gin-gonic/gin/README.md
new file mode 100644
index 0000000..2a111d2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/README.md
@@ -0,0 +1,677 @@
+
+#Gin Web Framework
+
+[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin)
+[![Coverage Status](https://coveralls.io/repos/gin-gonic/gin/badge.svg?branch=master)](https://coveralls.io/r/gin-gonic/gin?branch=master)
+[![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin)
+[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+Gin is a web framework written in Golang. It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin.
+
+
+
+![Gin console logger](https://gin-gonic.github.io/gin/other/console.png)
+
+```sh
+$ cat test.go
+```
+```go
+package main
+
+import "github.com/gin-gonic/gin"
+
+func main() {
+ r := gin.Default()
+ r.GET("/ping", func(c *gin.Context) {
+ c.JSON(200, gin.H{
+ "message": "hello world",
+ })
+ })
+ r.Run() // listen and server on 0.0.0.0:8080
+}
+```
+
+## Benchmarks
+
+Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter)
+
+[See all benchmarks](/BENCHMARKS.md)
+
+
+Benchmark name | (1) | (2) | (3) | (4)
+--------------------------------|----------:|----------:|----------:|------:
+BenchmarkAce_GithubAll | 10000 | 109482 | 13792 | 167
+BenchmarkBear_GithubAll | 10000 | 287490 | 79952 | 943
+BenchmarkBeego_GithubAll | 3000 | 562184 | 146272 | 2092
+BenchmarkBone_GithubAll | 500 | 2578716 | 648016 | 8119
+BenchmarkDenco_GithubAll | 20000 | 94955 | 20224 | 167
+BenchmarkEcho_GithubAll | 30000 | 58705 | 0 | 0
+**BenchmarkGin_GithubAll** | **30000** | **50991** | **0** | **0**
+BenchmarkGocraftWeb_GithubAll | 5000 | 449648 | 133280 | 1889
+BenchmarkGoji_GithubAll | 2000 | 689748 | 56113 | 334
+BenchmarkGoJsonRest_GithubAll | 5000 | 537769 | 135995 | 2940
+BenchmarkGoRestful_GithubAll | 100 | 18410628 | 797236 | 7725
+BenchmarkGorillaMux_GithubAll | 200 | 8036360 | 153137 | 1791
+BenchmarkHttpRouter_GithubAll | 20000 | 63506 | 13792 | 167
+BenchmarkHttpTreeMux_GithubAll | 10000 | 165927 | 56112 | 334
+BenchmarkKocha_GithubAll | 10000 | 171362 | 23304 | 843
+BenchmarkMacaron_GithubAll | 2000 | 817008 | 224960 | 2315
+BenchmarkMartini_GithubAll | 100 | 12609209 | 237952 | 2686
+BenchmarkPat_GithubAll | 300 | 4830398 | 1504101 | 32222
+BenchmarkPossum_GithubAll | 10000 | 301716 | 97440 | 812
+BenchmarkR2router_GithubAll | 10000 | 270691 | 77328 | 1182
+BenchmarkRevel_GithubAll | 1000 | 1491919 | 345553 | 5918
+BenchmarkRivet_GithubAll | 10000 | 283860 | 84272 | 1079
+BenchmarkTango_GithubAll | 5000 | 473821 | 87078 | 2470
+BenchmarkTigerTonic_GithubAll | 2000 | 1120131 | 241088 | 6052
+BenchmarkTraffic_GithubAll | 200 | 8708979 | 2664762 | 22390
+BenchmarkVulcan_GithubAll | 5000 | 353392 | 19894 | 609
+BenchmarkZeus_GithubAll | 2000 | 944234 | 300688 | 2648
+
+(1): Total Repetitions
+(2): Single Repetition Duration (ns/op)
+(3): Heap Memory (B/op)
+(4): Average Allocations per Repetition (allocs/op)
+
+##Gin v1. stable
+
+- [x] Zero allocation router.
+- [x] Still the fastest http router and framework. From routing to writing.
+- [x] Complete suite of unit tests
+- [x] Battle tested
+- [x] API frozen, new releases will not break your code.
+
+
+## Start using it
+1. Download and install it:
+
+```sh
+$ go get github.com/gin-gonic/gin
+```
+2. Import it in your code:
+
+```go
+import "github.com/gin-gonic/gin"
+```
+
+##API Examples
+
+#### Using GET, POST, PUT, PATCH, DELETE and OPTIONS
+
+```go
+func main() {
+ // Creates a gin router with default middleware:
+ // logger and recovery (crash-free) middleware
+ router := gin.Default()
+
+ router.GET("/someGet", getting)
+ router.POST("/somePost", posting)
+ router.PUT("/somePut", putting)
+ router.DELETE("/someDelete", deleting)
+ router.PATCH("/somePatch", patching)
+ router.HEAD("/someHead", head)
+ router.OPTIONS("/someOptions", options)
+
+ // By default it serves on :8080 unless a
+ // PORT environment variable was defined.
+ router.Run()
+ // router.Run.Run(":3000") for a hard coded port
+}
+```
+
+#### Parameters in path
+
+```go
+func main() {
+ router := gin.Default()
+
+ // This handler will match /user/john but will not match neither /user/ or /user
+ router.GET("/user/:name", func(c *gin.Context) {
+ name := c.Param("name")
+ c.String(http.StatusOK, "Hello %s", name)
+ })
+
+ // However, this one will match /user/john/ and also /user/john/send
+ // If no other routers match /user/john, it will redirect to /user/john/
+ router.GET("/user/:name/*action", func(c *gin.Context) {
+ name := c.Param("name")
+ action := c.Param("action")
+ message := name + " is " + action
+ c.String(http.StatusOK, message)
+ })
+
+ router.Run(":8080")
+}
+```
+
+#### Querystring parameters
+```go
+func main() {
+ router := gin.Default()
+
+ // Query string parameters are parsed using the existing underlying request object.
+ // The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe
+ router.GET("/welcome", func(c *gin.Context) {
+ firstname := c.DefaultQuery("firstname", "Guest")
+ lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname")
+
+ c.String(http.StatusOK, "Hello %s %s", firstname, lastname)
+ })
+ router.Run(":8080")
+}
+```
+
+### Multipart/Urlencoded Form
+
+```go
+func main() {
+ router := gin.Default()
+
+ router.POST("/form_post", func(c *gin.Context) {
+ message := c.PostForm("message")
+ nick := c.DefaultPostForm("nick", "anonymous")
+
+ c.JSON(200, gin.H{
+ "status": "posted",
+ "message": message,
+ "nick": nick,
+ })
+ })
+ router.Run(":8080")
+}
+```
+
+### Another example: query + post form
+
+```
+POST /post?id=1234&page=1 HTTP/1.1
+Content-Type: application/x-www-form-urlencoded
+
+name=manu&message=this_is_great
+```
+
+```go
+func main() {
+ router := gin.Default()
+
+ router.POST("/post", func(c *gin.Context) {
+
+ id := c.Query("id")
+ page := c.DefaultQuery("page", "0")
+ name := c.PostForm("name")
+ message := c.PostForm("message")
+
+ fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message)
+ })
+ router.Run(":8080")
+}
+```
+
+```
+id: 1234; page: 1; name: manu; message: this_is_great
+```
+
+
+#### Grouping routes
+```go
+func main() {
+ router := gin.Default()
+
+ // Simple group: v1
+ v1 := router.Group("/v1")
+ {
+ v1.POST("/login", loginEndpoint)
+ v1.POST("/submit", submitEndpoint)
+ v1.POST("/read", readEndpoint)
+ }
+
+ // Simple group: v2
+ v2 := router.Group("/v2")
+ {
+ v2.POST("/login", loginEndpoint)
+ v2.POST("/submit", submitEndpoint)
+ v2.POST("/read", readEndpoint)
+ }
+
+ router.Run(":8080")
+}
+```
+
+
+#### Blank Gin without middleware by default
+
+Use
+
+```go
+r := gin.New()
+```
+instead of
+
+```go
+r := gin.Default()
+```
+
+
+#### Using middleware
+```go
+func main() {
+ // Creates a router without any middleware by default
+ r := gin.New()
+
+ // Global middleware
+ r.Use(gin.Logger())
+ r.Use(gin.Recovery())
+
+ // Per route middleware, you can add as many as you desire.
+ r.GET("/benchmark", MyBenchLogger(), benchEndpoint)
+
+ // Authorization group
+ // authorized := r.Group("/", AuthRequired())
+ // exactly the same than:
+ authorized := r.Group("/")
+ // per group middleware! in this case we use the custom created
+ // AuthRequired() middleware just in the "authorized" group.
+ authorized.Use(AuthRequired())
+ {
+ authorized.POST("/login", loginEndpoint)
+ authorized.POST("/submit", submitEndpoint)
+ authorized.POST("/read", readEndpoint)
+
+ // nested group
+ testing := authorized.Group("testing")
+ testing.GET("/analytics", analyticsEndpoint)
+ }
+
+ // Listen and server on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+#### Model binding and validation
+
+To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz).
+
+Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`.
+
+When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use BindWith.
+
+You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, the current request will fail with an error.
+
+```go
+// Binding from JSON
+type Login struct {
+ User string `form:"user" json:"user" binding:"required"`
+ Password string `form:"password" json:"password" binding:"required"`
+}
+
+func main() {
+ router := gin.Default()
+
+ // Example for binding JSON ({"user": "manu", "password": "123"})
+ router.POST("/loginJSON", func(c *gin.Context) {
+ var json Login
+ if c.BindJSON(&json) == nil {
+ if json.User == "manu" && json.Password == "123" {
+ c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
+ } else {
+ c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
+ }
+ }
+ })
+
+ // Example for binding a HTML form (user=manu&password=123)
+ router.POST("/loginForm", func(c *gin.Context) {
+ var form Login
+ // This will infer what binder to use depending on the content-type header.
+ if c.Bind(&form) == nil {
+ if form.User == "manu" && form.Password == "123" {
+ c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
+ } else {
+ c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
+ }
+ }
+ })
+
+ // Listen and server on 0.0.0.0:8080
+ router.Run(":8080")
+}
+```
+
+
+###Multipart/Urlencoded binding
+```go
+package main
+
+import (
+ "github.com/gin-gonic/gin"
+ "github.com/gin-gonic/gin/binding"
+)
+
+type LoginForm struct {
+ User string `form:"user" binding:"required"`
+ Password string `form:"password" binding:"required"`
+}
+
+func main() {
+ router := gin.Default()
+ router.POST("/login", func(c *gin.Context) {
+ // you can bind multipart form with explicit binding declaration:
+ // c.BindWith(&form, binding.Form)
+ // or you can simply use autobinding with Bind method:
+ var form LoginForm
+ // in this case proper binding will be automatically selected
+ if c.Bind(&form) == nil {
+ if form.User == "user" && form.Password == "password" {
+ c.JSON(200, gin.H{"status": "you are logged in"})
+ } else {
+ c.JSON(401, gin.H{"status": "unauthorized"})
+ }
+ }
+ })
+ router.Run(":8080")
+}
+```
+
+Test it with:
+```sh
+$ curl -v --form user=user --form password=password http://localhost:8080/login
+```
+
+
+#### XML and JSON rendering
+
+```go
+func main() {
+ r := gin.Default()
+
+ // gin.H is a shortcut for map[string]interface{}
+ r.GET("/someJSON", func(c *gin.Context) {
+ c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
+ })
+
+ r.GET("/moreJSON", func(c *gin.Context) {
+ // You also can use a struct
+ var msg struct {
+ Name string `json:"user"`
+ Message string
+ Number int
+ }
+ msg.Name = "Lena"
+ msg.Message = "hey"
+ msg.Number = 123
+ // Note that msg.Name becomes "user" in the JSON
+ // Will output : {"user": "Lena", "Message": "hey", "Number": 123}
+ c.JSON(http.StatusOK, msg)
+ })
+
+ r.GET("/someXML", func(c *gin.Context) {
+ c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
+ })
+
+ // Listen and server on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+####Serving static files
+
+```go
+func main() {
+ router := gin.Default()
+ router.Static("/assets", "./assets")
+ router.StaticFS("/more_static", http.Dir("my_file_system"))
+ router.StaticFile("/favicon.ico", "./resources/favicon.ico")
+
+ // Listen and server on 0.0.0.0:8080
+ router.Run(":8080")
+}
+```
+
+####HTML rendering
+
+Using LoadHTMLTemplates()
+
+```go
+func main() {
+ router := gin.Default()
+ router.LoadHTMLGlob("templates/*")
+ //router.LoadHTMLFiles("templates/template1.html", "templates/template2.html")
+ router.GET("/index", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "index.tmpl", gin.H{
+ "title": "Main website",
+ })
+ })
+ router.Run(":8080")
+}
+```
+templates/index.tmpl
+```html
+
+
+ {{ .title }}
+
+
+```
+
+Using templates with same name in different directories
+
+```go
+func main() {
+ router := gin.Default()
+ router.LoadHTMLGlob("templates/**/*")
+ router.GET("/posts/index", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{
+ "title": "Posts",
+ })
+ })
+ router.GET("/users/index", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "users/index.tmpl", gin.H{
+ "title": "Users",
+ })
+ })
+ router.Run(":8080")
+}
+```
+templates/posts/index.tmpl
+```html
+{{ define "posts/index.tmpl" }}
+
+ {{ .title }}
+
+Using posts/index.tmpl
+
+{{ end }}
+```
+templates/users/index.tmpl
+```html
+{{ define "users/index.tmpl" }}
+
+ {{ .title }}
+
+Using users/index.tmpl
+
+{{ end }}
+```
+
+You can also use your own html template render
+
+```go
+import "html/template"
+
+func main() {
+ router := gin.Default()
+ html := template.Must(template.ParseFiles("file1", "file2"))
+ router.SetHTMLTemplate(html)
+ router.Run(":8080")
+}
+```
+
+
+#### Redirects
+
+Issuing a HTTP redirect is easy:
+
+```go
+r.GET("/test", func(c *gin.Context) {
+ c.Redirect(http.StatusMovedPermanently, "http://www.google.com/")
+})
+```
+Both internal and external locations are supported.
+
+
+#### Custom Middleware
+
+```go
+func Logger() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ t := time.Now()
+
+ // Set example variable
+ c.Set("example", "12345")
+
+ // before request
+
+ c.Next()
+
+ // after request
+ latency := time.Since(t)
+ log.Print(latency)
+
+ // access the status we are sending
+ status := c.Writer.Status()
+ log.Println(status)
+ }
+}
+
+func main() {
+ r := gin.New()
+ r.Use(Logger())
+
+ r.GET("/test", func(c *gin.Context) {
+ example := c.MustGet("example").(string)
+
+ // it would print: "12345"
+ log.Println(example)
+ })
+
+ // Listen and server on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+#### Using BasicAuth() middleware
+```go
+// simulate some private data
+var secrets = gin.H{
+ "foo": gin.H{"email": "foo@bar.com", "phone": "123433"},
+ "austin": gin.H{"email": "austin@example.com", "phone": "666"},
+ "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"},
+}
+
+func main() {
+ r := gin.Default()
+
+ // Group using gin.BasicAuth() middleware
+ // gin.Accounts is a shortcut for map[string]string
+ authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{
+ "foo": "bar",
+ "austin": "1234",
+ "lena": "hello2",
+ "manu": "4321",
+ }))
+
+ // /admin/secrets endpoint
+ // hit "localhost:8080/admin/secrets
+ authorized.GET("/secrets", func(c *gin.Context) {
+ // get user, it was setted by the BasicAuth middleware
+ user := c.MustGet(gin.AuthUserKey).(string)
+ if secret, ok := secrets[user]; ok {
+ c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret})
+ } else {
+ c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("})
+ }
+ })
+
+ // Listen and server on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+
+#### Goroutines inside a middleware
+When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy.
+
+```go
+func main() {
+ r := gin.Default()
+
+ r.GET("/long_async", func(c *gin.Context) {
+ // create copy to be used inside the goroutine
+ cCp := c.Copy()
+ go func() {
+ // simulate a long task with time.Sleep(). 5 seconds
+ time.Sleep(5 * time.Second)
+
+ // note than you are using the copied context "c_cp", IMPORTANT
+ log.Println("Done! in path " + cCp.Request.URL.Path)
+ }()
+ })
+
+ r.GET("/long_sync", func(c *gin.Context) {
+ // simulate a long task with time.Sleep(). 5 seconds
+ time.Sleep(5 * time.Second)
+
+ // since we are NOT using a goroutine, we do not have to copy the context
+ log.Println("Done! in path " + c.Request.URL.Path)
+ })
+
+ // Listen and server on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+#### Custom HTTP configuration
+
+Use `http.ListenAndServe()` directly, like this:
+
+```go
+func main() {
+ router := gin.Default()
+ http.ListenAndServe(":8080", router)
+}
+```
+or
+
+```go
+func main() {
+ router := gin.Default()
+
+ s := &http.Server{
+ Addr: ":8080",
+ Handler: router,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ s.ListenAndServe()
+}
+```
+
+#### Graceful restart or stop
+
+Do you want to graceful restart or stop your web server?
+There be some ways.
+
+We can using fvbock/endless to replace the default ListenAndServe
+
+Refer the issue for more details:
+
+https://github.com/gin-gonic/gin/issues/296
+
+```go
+router := gin.Default()
+router.GET("/", handler)
+// [...]
+endless.ListenAndServe(":4242", router)
+
+```
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/auth.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/auth.go
new file mode 100644
index 0000000..125e659
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/auth.go
@@ -0,0 +1,92 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "crypto/subtle"
+ "encoding/base64"
+ "strconv"
+)
+
+const AuthUserKey = "user"
+
+type (
+ Accounts map[string]string
+ authPair struct {
+ Value string
+ User string
+ }
+ authPairs []authPair
+)
+
+func (a authPairs) searchCredential(authValue string) (string, bool) {
+ if len(authValue) == 0 {
+ return "", false
+ }
+ for _, pair := range a {
+ if pair.Value == authValue {
+ return pair.User, true
+ }
+ }
+ return "", false
+}
+
+// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where
+// the key is the user name and the value is the password, as well as the name of the Realm.
+// If the realm is empty, "Authorization Required" will be used by default.
+// (see http://tools.ietf.org/html/rfc2617#section-1.2)
+func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc {
+ if realm == "" {
+ realm = "Authorization Required"
+ }
+ realm = "Basic realm=" + strconv.Quote(realm)
+ pairs := processAccounts(accounts)
+ return func(c *Context) {
+ // Search user in the slice of allowed credentials
+ user, found := pairs.searchCredential(c.Request.Header.Get("Authorization"))
+ if !found {
+ // Credentials doesn't match, we return 401 and abort handlers chain.
+ c.Header("WWW-Authenticate", realm)
+ c.AbortWithStatus(401)
+ } else {
+ // The user credentials was found, set user's id to key AuthUserKey in this context, the userId can be read later using
+ // c.MustGet(gin.AuthUserKey)
+ c.Set(AuthUserKey, user)
+ }
+ }
+}
+
+// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where
+// the key is the user name and the value is the password.
+func BasicAuth(accounts Accounts) HandlerFunc {
+ return BasicAuthForRealm(accounts, "")
+}
+
+func processAccounts(accounts Accounts) authPairs {
+ assert1(len(accounts) > 0, "Empty list of authorized credentials")
+ pairs := make(authPairs, 0, len(accounts))
+ for user, password := range accounts {
+ assert1(len(user) > 0, "User can not be empty")
+ value := authorizationHeader(user, password)
+ pairs = append(pairs, authPair{
+ Value: value,
+ User: user,
+ })
+ }
+ return pairs
+}
+
+func authorizationHeader(user, password string) string {
+ base := user + ":" + password
+ return "Basic " + base64.StdEncoding.EncodeToString([]byte(base))
+}
+
+func secureCompare(given, actual string) bool {
+ if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 {
+ return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1
+ }
+ /* Securely compare actual to itself to keep constant time, but always return false */
+ return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/binding.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/binding.go
new file mode 100644
index 0000000..dc7397f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/binding.go
@@ -0,0 +1,67 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import "net/http"
+
+const (
+ MIMEJSON = "application/json"
+ MIMEHTML = "text/html"
+ MIMEXML = "application/xml"
+ MIMEXML2 = "text/xml"
+ MIMEPlain = "text/plain"
+ MIMEPOSTForm = "application/x-www-form-urlencoded"
+ MIMEMultipartPOSTForm = "multipart/form-data"
+ MIMEPROTOBUF = "application/x-protobuf"
+)
+
+type Binding interface {
+ Name() string
+ Bind(*http.Request, interface{}) error
+}
+
+type StructValidator interface {
+ // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right.
+ // If the received type is not a struct, any validation should be skipped and nil must be returned.
+ // If the received type is a struct or pointer to a struct, the validation should be performed.
+ // If the struct is not valid or the validation itself fails, a descriptive error should be returned.
+ // Otherwise nil must be returned.
+ ValidateStruct(interface{}) error
+}
+
+var Validator StructValidator = &defaultValidator{}
+
+var (
+ JSON = jsonBinding{}
+ XML = xmlBinding{}
+ Form = formBinding{}
+ FormPost = formPostBinding{}
+ FormMultipart = formMultipartBinding{}
+ ProtoBuf = protobufBinding{}
+)
+
+func Default(method, contentType string) Binding {
+ if method == "GET" {
+ return Form
+ } else {
+ switch contentType {
+ case MIMEJSON:
+ return JSON
+ case MIMEXML, MIMEXML2:
+ return XML
+ case MIMEPROTOBUF:
+ return ProtoBuf
+ default: //case MIMEPOSTForm, MIMEMultipartPOSTForm:
+ return Form
+ }
+ }
+}
+
+func validate(obj interface{}) error {
+ if Validator == nil {
+ return nil
+ }
+ return Validator.ValidateStruct(obj)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/default_validator.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/default_validator.go
new file mode 100644
index 0000000..760728b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/default_validator.go
@@ -0,0 +1,41 @@
+package binding
+
+import (
+ "reflect"
+ "sync"
+
+ "gopkg.in/go-playground/validator.v8"
+)
+
+type defaultValidator struct {
+ once sync.Once
+ validate *validator.Validate
+}
+
+var _ StructValidator = &defaultValidator{}
+
+func (v *defaultValidator) ValidateStruct(obj interface{}) error {
+ if kindOfData(obj) == reflect.Struct {
+ v.lazyinit()
+ if err := v.validate.Struct(obj); err != nil {
+ return error(err)
+ }
+ }
+ return nil
+}
+
+func (v *defaultValidator) lazyinit() {
+ v.once.Do(func() {
+ config := &validator.Config{TagName: "binding"}
+ v.validate = validator.New(config)
+ })
+}
+
+func kindOfData(data interface{}) reflect.Kind {
+ value := reflect.ValueOf(data)
+ valueType := value.Kind()
+ if valueType == reflect.Ptr {
+ valueType = value.Elem().Kind()
+ }
+ return valueType
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/form.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/form.go
new file mode 100644
index 0000000..557333e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/form.go
@@ -0,0 +1,54 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import "net/http"
+
+type formBinding struct{}
+type formPostBinding struct{}
+type formMultipartBinding struct{}
+
+func (formBinding) Name() string {
+ return "form"
+}
+
+func (formBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseForm(); err != nil {
+ return err
+ }
+ req.ParseMultipartForm(32 << 10) // 32 MB
+ if err := mapForm(obj, req.Form); err != nil {
+ return err
+ }
+ return validate(obj)
+}
+
+func (formPostBinding) Name() string {
+ return "form-urlencoded"
+}
+
+func (formPostBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseForm(); err != nil {
+ return err
+ }
+ if err := mapForm(obj, req.PostForm); err != nil {
+ return err
+ }
+ return validate(obj)
+}
+
+func (formMultipartBinding) Name() string {
+ return "multipart/form-data"
+}
+
+func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseMultipartForm(32 << 10); err != nil {
+ return err
+ }
+ if err := mapForm(obj, req.MultipartForm.Value); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/form_mapping.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/form_mapping.go
new file mode 100644
index 0000000..07c8375
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/form_mapping.go
@@ -0,0 +1,150 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "errors"
+ "reflect"
+ "strconv"
+)
+
+func mapForm(ptr interface{}, form map[string][]string) error {
+ typ := reflect.TypeOf(ptr).Elem()
+ val := reflect.ValueOf(ptr).Elem()
+ for i := 0; i < typ.NumField(); i++ {
+ typeField := typ.Field(i)
+ structField := val.Field(i)
+ if !structField.CanSet() {
+ continue
+ }
+
+ structFieldKind := structField.Kind()
+ inputFieldName := typeField.Tag.Get("form")
+ if inputFieldName == "" {
+ inputFieldName = typeField.Name
+
+ // if "form" tag is nil, we inspect if the field is a struct.
+ // this would not make sense for JSON parsing but it does for a form
+ // since data is flatten
+ if structFieldKind == reflect.Struct {
+ err := mapForm(structField.Addr().Interface(), form)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ }
+ inputValue, exists := form[inputFieldName]
+ if !exists {
+ continue
+ }
+
+ numElems := len(inputValue)
+ if structFieldKind == reflect.Slice && numElems > 0 {
+ sliceOf := structField.Type().Elem().Kind()
+ slice := reflect.MakeSlice(structField.Type(), numElems, numElems)
+ for i := 0; i < numElems; i++ {
+ if err := setWithProperType(sliceOf, inputValue[i], slice.Index(i)); err != nil {
+ return err
+ }
+ }
+ val.Field(i).Set(slice)
+ } else {
+ if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error {
+ switch valueKind {
+ case reflect.Int:
+ return setIntField(val, 0, structField)
+ case reflect.Int8:
+ return setIntField(val, 8, structField)
+ case reflect.Int16:
+ return setIntField(val, 16, structField)
+ case reflect.Int32:
+ return setIntField(val, 32, structField)
+ case reflect.Int64:
+ return setIntField(val, 64, structField)
+ case reflect.Uint:
+ return setUintField(val, 0, structField)
+ case reflect.Uint8:
+ return setUintField(val, 8, structField)
+ case reflect.Uint16:
+ return setUintField(val, 16, structField)
+ case reflect.Uint32:
+ return setUintField(val, 32, structField)
+ case reflect.Uint64:
+ return setUintField(val, 64, structField)
+ case reflect.Bool:
+ return setBoolField(val, structField)
+ case reflect.Float32:
+ return setFloatField(val, 32, structField)
+ case reflect.Float64:
+ return setFloatField(val, 64, structField)
+ case reflect.String:
+ structField.SetString(val)
+ default:
+ return errors.New("Unknown type")
+ }
+ return nil
+}
+
+func setIntField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0"
+ }
+ intVal, err := strconv.ParseInt(val, 10, bitSize)
+ if err == nil {
+ field.SetInt(intVal)
+ }
+ return err
+}
+
+func setUintField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0"
+ }
+ uintVal, err := strconv.ParseUint(val, 10, bitSize)
+ if err == nil {
+ field.SetUint(uintVal)
+ }
+ return err
+}
+
+func setBoolField(val string, field reflect.Value) error {
+ if val == "" {
+ val = "false"
+ }
+ boolVal, err := strconv.ParseBool(val)
+ if err == nil {
+ field.SetBool(boolVal)
+ }
+ return nil
+}
+
+func setFloatField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0.0"
+ }
+ floatVal, err := strconv.ParseFloat(val, bitSize)
+ if err == nil {
+ field.SetFloat(floatVal)
+ }
+ return err
+}
+
+// Don't pass in pointers to bind to. Can lead to bugs. See:
+// https://github.com/codegangsta/martini-contrib/issues/40
+// https://github.com/codegangsta/martini-contrib/pull/34#issuecomment-29683659
+func ensureNotPointer(obj interface{}) {
+ if reflect.TypeOf(obj).Kind() == reflect.Ptr {
+ panic("Pointers are not accepted as binding models")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/json.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/json.go
new file mode 100644
index 0000000..6e53244
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/json.go
@@ -0,0 +1,25 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "encoding/json"
+
+ "net/http"
+)
+
+type jsonBinding struct{}
+
+func (jsonBinding) Name() string {
+ return "json"
+}
+
+func (jsonBinding) Bind(req *http.Request, obj interface{}) error {
+ decoder := json.NewDecoder(req.Body)
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/protobuf.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/protobuf.go
new file mode 100644
index 0000000..d6bef02
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/protobuf.go
@@ -0,0 +1,35 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "github.com/golang/protobuf/proto"
+
+ "io/ioutil"
+ "net/http"
+)
+
+type protobufBinding struct{}
+
+func (_ protobufBinding) Name() string {
+ return "protobuf"
+}
+
+func (_ protobufBinding) Bind(req *http.Request, obj interface{}) error {
+
+ buf, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+
+ if err = proto.Unmarshal(buf, obj.(proto.Message)); err != nil {
+ return err
+ }
+
+ //Here it's same to return validate(obj), but util now we cann't add `binding:""` to the struct
+ //which automatically generate by gen-proto
+ return nil
+ //return validate(obj)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/xml.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/xml.go
new file mode 100644
index 0000000..f84a6b7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/binding/xml.go
@@ -0,0 +1,24 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+type xmlBinding struct{}
+
+func (xmlBinding) Name() string {
+ return "xml"
+}
+
+func (xmlBinding) Bind(req *http.Request, obj interface{}) error {
+ decoder := xml.NewDecoder(req.Body)
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/context.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/context.go
new file mode 100644
index 0000000..2fb69b7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/context.go
@@ -0,0 +1,562 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "errors"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/gin-gonic/gin/binding"
+ "github.com/gin-gonic/gin/render"
+ "github.com/manucorporat/sse"
+ "golang.org/x/net/context"
+)
+
+// Content-Type MIME of the most common data formats
+const (
+ MIMEJSON = binding.MIMEJSON
+ MIMEHTML = binding.MIMEHTML
+ MIMEXML = binding.MIMEXML
+ MIMEXML2 = binding.MIMEXML2
+ MIMEPlain = binding.MIMEPlain
+ MIMEPOSTForm = binding.MIMEPOSTForm
+ MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm
+)
+
+const abortIndex int8 = math.MaxInt8 / 2
+
+// Context is the most important part of gin. It allows us to pass variables between middleware,
+// manage the flow, validate the JSON of a request and render a JSON response for example.
+type Context struct {
+ writermem responseWriter
+ Request *http.Request
+ Writer ResponseWriter
+
+ Params Params
+ handlers HandlersChain
+ index int8
+
+ engine *Engine
+ Keys map[string]interface{}
+ Errors errorMsgs
+ Accepted []string
+}
+
+var _ context.Context = &Context{}
+
+/************************************/
+/********** CONTEXT CREATION ********/
+/************************************/
+
+func (c *Context) reset() {
+ c.Writer = &c.writermem
+ c.Params = c.Params[0:0]
+ c.handlers = nil
+ c.index = -1
+ c.Keys = nil
+ c.Errors = c.Errors[0:0]
+ c.Accepted = nil
+}
+
+// Copy returns a copy of the current context that can be safely used outside the request's scope.
+// This have to be used then the context has to be passed to a goroutine.
+func (c *Context) Copy() *Context {
+ var cp Context = *c
+ cp.writermem.ResponseWriter = nil
+ cp.Writer = &cp.writermem
+ cp.index = abortIndex
+ cp.handlers = nil
+ return &cp
+}
+
+// HandlerName returns the main handle's name. For example if the handler is "handleGetUsers()", this
+// function will return "main.handleGetUsers"
+func (c *Context) HandlerName() string {
+ return nameOfFunction(c.handlers.Last())
+}
+
+/************************************/
+/*********** FLOW CONTROL ***********/
+/************************************/
+
+// Next should be used only inside middleware.
+// It executes the pending handlers in the chain inside the calling handler.
+// See example in github.
+func (c *Context) Next() {
+ c.index++
+ s := int8(len(c.handlers))
+ for ; c.index < s; c.index++ {
+ c.handlers[c.index](c)
+ }
+}
+
+// IsAborted returns true if the currect context was aborted.
+func (c *Context) IsAborted() bool {
+ return c.index >= abortIndex
+}
+
+// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
+// Let's say you have an authorization middleware that validates that the current request is authorized. If the
+// authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
+// for this request are not called.
+func (c *Context) Abort() {
+ c.index = abortIndex
+}
+
+// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
+// For example, a failed attempt to authentificate a request could use: context.AbortWithStatus(401).
+func (c *Context) AbortWithStatus(code int) {
+ c.Status(code)
+ c.Abort()
+}
+
+// AbortWithError calls `AbortWithStatus()` and `Error()` internally. This method stops the chain, writes the status code and
+// pushes the specified error to `c.Errors`.
+// See Context.Error() for more details.
+func (c *Context) AbortWithError(code int, err error) *Error {
+ c.AbortWithStatus(code)
+ return c.Error(err)
+}
+
+/************************************/
+/********* ERROR MANAGEMENT *********/
+/************************************/
+
+// Attaches an error to the current context. The error is pushed to a list of errors.
+// It's a good idea to call Error for each error that occurred during the resolution of a request.
+// A middleware can be used to collect all the errors
+// and push them to a database together, print a log, or append it in the HTTP response.
+func (c *Context) Error(err error) *Error {
+ var parsedError *Error
+ switch err.(type) {
+ case *Error:
+ parsedError = err.(*Error)
+ default:
+ parsedError = &Error{
+ Err: err,
+ Type: ErrorTypePrivate,
+ }
+ }
+ c.Errors = append(c.Errors, parsedError)
+ return parsedError
+}
+
+/************************************/
+/******** METADATA MANAGEMENT********/
+/************************************/
+
+// Set is used to store a new key/value pair exclusivelly for this context.
+// It also lazy initializes c.Keys if it was not used previously.
+func (c *Context) Set(key string, value interface{}) {
+ if c.Keys == nil {
+ c.Keys = make(map[string]interface{})
+ }
+ c.Keys[key] = value
+}
+
+// Get returns the value for the given key, ie: (value, true).
+// If the value does not exists it returns (nil, false)
+func (c *Context) Get(key string) (value interface{}, exists bool) {
+ if c.Keys != nil {
+ value, exists = c.Keys[key]
+ }
+ return
+}
+
+// Returns the value for the given key if it exists, otherwise it panics.
+func (c *Context) MustGet(key string) interface{} {
+ if value, exists := c.Get(key); exists {
+ return value
+ }
+ panic("Key \"" + key + "\" does not exist")
+}
+
+/************************************/
+/************ INPUT DATA ************/
+/************************************/
+
+// Param returns the value of the URL param.
+// It is a shortcut for c.Params.ByName(key)
+// router.GET("/user/:id", func(c *gin.Context) {
+// // a GET request to /user/john
+// id := c.Param("id") // id == "john"
+// })
+func (c *Context) Param(key string) string {
+ return c.Params.ByName(key)
+}
+
+// Query returns the keyed url query value if it exists,
+// othewise it returns an empty string `("")`.
+// It is shortcut for `c.Request.URL.Query().Get(key)`
+// GET /path?id=1234&name=Manu&value=
+// c.Query("id") == "1234"
+// c.Query("name") == "Manu"
+// c.Query("value") == ""
+// c.Query("wtf") == ""
+func (c *Context) Query(key string) string {
+ value, _ := c.GetQuery(key)
+ return value
+}
+
+// DefaultQuery returns the keyed url query value if it exists,
+// othewise it returns the specified defaultValue string.
+// See: Query() and GetQuery() for further information.
+// GET /?name=Manu&lastname=
+// c.DefaultQuery("name", "unknown") == "Manu"
+// c.DefaultQuery("id", "none") == "none"
+// c.DefaultQuery("lastname", "none") == ""
+func (c *Context) DefaultQuery(key, defaultValue string) string {
+ if value, ok := c.GetQuery(key); ok {
+ return value
+ }
+ return defaultValue
+}
+
+// GetQuery is like Query(), it returns the keyed url query value
+// if it exists `(value, true)` (even when the value is an empty string),
+// othewise it returns `("", false)`.
+// It is shortcut for `c.Request.URL.Query().Get(key)`
+// GET /?name=Manu&lastname=
+// ("Manu", true) == c.GetQuery("name")
+// ("", false) == c.GetQuery("id")
+// ("", true) == c.GetQuery("lastname")
+func (c *Context) GetQuery(key string) (string, bool) {
+ req := c.Request
+ if values, ok := req.URL.Query()[key]; ok && len(values) > 0 {
+ return values[0], true
+ }
+ return "", false
+}
+
+// PostForm returns the specified key from a POST urlencoded form or multipart form
+// when it exists, otherwise it returns an empty string `("")`.
+func (c *Context) PostForm(key string) string {
+ value, _ := c.GetPostForm(key)
+ return value
+}
+
+// PostForm returns the specified key from a POST urlencoded form or multipart form
+// when it exists, otherwise it returns the specified defaultValue string.
+// See: PostForm() and GetPostForm() for further information.
+func (c *Context) DefaultPostForm(key, defaultValue string) string {
+ if value, ok := c.GetPostForm(key); ok {
+ return value
+ }
+ return defaultValue
+}
+
+// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded
+// form or multipart form when it exists `(value, true)` (even when the value is an empty string),
+// otherwise it returns ("", false).
+// For example, during a PATCH request to update the user's email:
+// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com"
+// email= --> ("", true) := GetPostForm("email") // set email to ""
+// --> ("", false) := GetPostForm("email") // do nothing with email
+func (c *Context) GetPostForm(key string) (string, bool) {
+ req := c.Request
+ req.ParseMultipartForm(32 << 20) // 32 MB
+ if values := req.PostForm[key]; len(values) > 0 {
+ return values[0], true
+ }
+ if req.MultipartForm != nil && req.MultipartForm.File != nil {
+ if values := req.MultipartForm.Value[key]; len(values) > 0 {
+ return values[0], true
+ }
+ }
+ return "", false
+}
+
+// Bind checks the Content-Type to select a binding engine automatically,
+// Depending the "Content-Type" header different bindings are used:
+// "application/json" --> JSON binding
+// "application/xml" --> XML binding
+// otherwise --> returns an error
+// If Parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input.
+// It decodes the json payload into the struct specified as a pointer.
+// Like ParseBody() but this method also writes a 400 error if the json is not valid.
+func (c *Context) Bind(obj interface{}) error {
+ b := binding.Default(c.Request.Method, c.ContentType())
+ return c.BindWith(obj, b)
+}
+
+// BindJSON is a shortcut for c.BindWith(obj, binding.JSON)
+func (c *Context) BindJSON(obj interface{}) error {
+ return c.BindWith(obj, binding.JSON)
+}
+
+// BindWith binds the passed struct pointer using the specified binding engine.
+// See the binding package.
+func (c *Context) BindWith(obj interface{}, b binding.Binding) error {
+ if err := b.Bind(c.Request, obj); err != nil {
+ c.AbortWithError(400, err).SetType(ErrorTypeBind)
+ return err
+ }
+ return nil
+}
+
+// ClientIP implements a best effort algorithm to return the real client IP, it parses
+// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy.
+func (c *Context) ClientIP() string {
+ if c.engine.ForwardedByClientIP {
+ clientIP := strings.TrimSpace(c.requestHeader("X-Real-Ip"))
+ if len(clientIP) > 0 {
+ return clientIP
+ }
+ clientIP = c.requestHeader("X-Forwarded-For")
+ if index := strings.IndexByte(clientIP, ','); index >= 0 {
+ clientIP = clientIP[0:index]
+ }
+ clientIP = strings.TrimSpace(clientIP)
+ if len(clientIP) > 0 {
+ return clientIP
+ }
+ }
+ if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil {
+ return ip
+ }
+ return ""
+}
+
+// ContentType returns the Content-Type header of the request.
+func (c *Context) ContentType() string {
+ return filterFlags(c.requestHeader("Content-Type"))
+}
+
+func (c *Context) requestHeader(key string) string {
+ if values, _ := c.Request.Header[key]; len(values) > 0 {
+ return values[0]
+ }
+ return ""
+}
+
+/************************************/
+/******** RESPONSE RENDERING ********/
+/************************************/
+
+func (c *Context) Status(code int) {
+ c.writermem.WriteHeader(code)
+}
+
+// Header is a intelligent shortcut for c.Writer.Header().Set(key, value)
+// It writes a header in the response.
+// If value == "", this method removes the header `c.Writer.Header().Del(key)`
+func (c *Context) Header(key, value string) {
+ if len(value) == 0 {
+ c.Writer.Header().Del(key)
+ } else {
+ c.Writer.Header().Set(key, value)
+ }
+}
+
+func (c *Context) SetCookie(
+ name string,
+ value string,
+ maxAge int,
+ path string,
+ domain string,
+ secure bool,
+ httpOnly bool,
+) {
+ if path == "" {
+ path = "/"
+ }
+ http.SetCookie(c.Writer, &http.Cookie{
+ Name: name,
+ Value: url.QueryEscape(value),
+ MaxAge: maxAge,
+ Path: path,
+ Domain: domain,
+ Secure: secure,
+ HttpOnly: httpOnly,
+ })
+}
+
+func (c *Context) Cookie(name string) (string, error) {
+ cookie, err := c.Request.Cookie(name)
+ if err != nil {
+ return "", err
+ }
+ val, _ := url.QueryUnescape(cookie.Value)
+ return val, nil
+}
+
+func (c *Context) Render(code int, r render.Render) {
+ c.Status(code)
+ if err := r.Render(c.Writer); err != nil {
+ panic(err)
+ }
+}
+
+// HTML renders the HTTP template specified by its file name.
+// It also updates the HTTP code and sets the Content-Type as "text/html".
+// See http://golang.org/doc/articles/wiki/
+func (c *Context) HTML(code int, name string, obj interface{}) {
+ instance := c.engine.HTMLRender.Instance(name, obj)
+ c.Render(code, instance)
+}
+
+// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body.
+// It also sets the Content-Type as "application/json".
+// WARNING: we recommend to use this only for development propuses since printing pretty JSON is
+// more CPU and bandwidth consuming. Use Context.JSON() instead.
+func (c *Context) IndentedJSON(code int, obj interface{}) {
+ c.Render(code, render.IndentedJSON{Data: obj})
+}
+
+// JSON serializes the given struct as JSON into the response body.
+// It also sets the Content-Type as "application/json".
+func (c *Context) JSON(code int, obj interface{}) {
+ c.Status(code)
+ if err := render.WriteJSON(c.Writer, obj); err != nil {
+ panic(err)
+ }
+}
+
+// XML serializes the given struct as XML into the response body.
+// It also sets the Content-Type as "application/xml".
+func (c *Context) XML(code int, obj interface{}) {
+ c.Render(code, render.XML{Data: obj})
+}
+
+// String writes the given string into the response body.
+func (c *Context) String(code int, format string, values ...interface{}) {
+ c.Status(code)
+ render.WriteString(c.Writer, format, values)
+}
+
+// Redirect returns a HTTP redirect to the specific location.
+func (c *Context) Redirect(code int, location string) {
+ c.Render(-1, render.Redirect{
+ Code: code,
+ Location: location,
+ Request: c.Request,
+ })
+}
+
+// Data writes some data into the body stream and updates the HTTP code.
+func (c *Context) Data(code int, contentType string, data []byte) {
+ c.Render(code, render.Data{
+ ContentType: contentType,
+ Data: data,
+ })
+}
+
+// File writes the specified file into the body stream in a efficient way.
+func (c *Context) File(filepath string) {
+ http.ServeFile(c.Writer, c.Request, filepath)
+}
+
+// SSEvent writes a Server-Sent Event into the body stream.
+func (c *Context) SSEvent(name string, message interface{}) {
+ c.Render(-1, sse.Event{
+ Event: name,
+ Data: message,
+ })
+}
+
+func (c *Context) Stream(step func(w io.Writer) bool) {
+ w := c.Writer
+ clientGone := w.CloseNotify()
+ for {
+ select {
+ case <-clientGone:
+ return
+ default:
+ keepOpen := step(w)
+ w.Flush()
+ if !keepOpen {
+ return
+ }
+ }
+ }
+}
+
+/************************************/
+/******** CONTENT NEGOTIATION *******/
+/************************************/
+
+type Negotiate struct {
+ Offered []string
+ HTMLName string
+ HTMLData interface{}
+ JSONData interface{}
+ XMLData interface{}
+ Data interface{}
+}
+
+func (c *Context) Negotiate(code int, config Negotiate) {
+ switch c.NegotiateFormat(config.Offered...) {
+ case binding.MIMEJSON:
+ data := chooseData(config.JSONData, config.Data)
+ c.JSON(code, data)
+
+ case binding.MIMEHTML:
+ data := chooseData(config.HTMLData, config.Data)
+ c.HTML(code, config.HTMLName, data)
+
+ case binding.MIMEXML:
+ data := chooseData(config.XMLData, config.Data)
+ c.XML(code, data)
+
+ default:
+ c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server"))
+ }
+}
+
+func (c *Context) NegotiateFormat(offered ...string) string {
+ assert1(len(offered) > 0, "you must provide at least one offer")
+
+ if c.Accepted == nil {
+ c.Accepted = parseAccept(c.requestHeader("Accept"))
+ }
+ if len(c.Accepted) == 0 {
+ return offered[0]
+ }
+ for _, accepted := range c.Accepted {
+ for _, offert := range offered {
+ if accepted == offert {
+ return offert
+ }
+ }
+ }
+ return ""
+}
+
+func (c *Context) SetAccepted(formats ...string) {
+ c.Accepted = formats
+}
+
+/************************************/
+/***** GOLANG.ORG/X/NET/CONTEXT *****/
+/************************************/
+
+func (c *Context) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (c *Context) Done() <-chan struct{} {
+ return nil
+}
+
+func (c *Context) Err() error {
+ return nil
+}
+
+func (c *Context) Value(key interface{}) interface{} {
+ if key == 0 {
+ return c.Request
+ }
+ if keyAsString, ok := key.(string); ok {
+ val, _ := c.Get(keyAsString)
+ return val
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/debug.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/debug.go
new file mode 100644
index 0000000..a121591
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/debug.go
@@ -0,0 +1,71 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "html/template"
+ "log"
+)
+
+func init() {
+ log.SetFlags(0)
+}
+
+// IsDebugging returns true if the framework is running in debug mode.
+// Use SetMode(gin.Release) to switch to disable the debug mode.
+func IsDebugging() bool {
+ return ginMode == debugCode
+}
+
+func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) {
+ if IsDebugging() {
+ nuHandlers := len(handlers)
+ handlerName := nameOfFunction(handlers.Last())
+ debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers)
+ }
+}
+
+func debugPrintLoadTemplate(tmpl *template.Template) {
+ if IsDebugging() {
+ var buf bytes.Buffer
+ for _, tmpl := range tmpl.Templates() {
+ buf.WriteString("\t- ")
+ buf.WriteString(tmpl.Name())
+ buf.WriteString("\n")
+ }
+ debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String())
+ }
+}
+
+func debugPrint(format string, values ...interface{}) {
+ if IsDebugging() {
+ log.Printf("[GIN-debug] "+format, values...)
+ }
+}
+
+func debugPrintWARNINGNew() {
+ debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production.
+ - using env: export GIN_MODE=release
+ - using code: gin.SetMode(gin.ReleaseMode)
+
+`)
+}
+
+func debugPrintWARNINGSetHTMLTemplate() {
+ debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called
+at initialization. ie. before any route is registered or the router is listening in a socket:
+
+ router := gin.Default()
+ router.SetHTMLTemplate(template) // << good place
+
+`)
+}
+
+func debugPrintError(err error) {
+ if err != nil {
+ debugPrint("[ERROR] %v\n", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/deprecated.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/deprecated.go
new file mode 100644
index 0000000..0488a9b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/deprecated.go
@@ -0,0 +1,12 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import "log"
+
+func (c *Context) GetCookie(name string) (string, error) {
+ log.Println("GetCookie() method is deprecated. Use Cookie() instead.")
+ return c.Cookie(name)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/errors.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/errors.go
new file mode 100644
index 0000000..bced19a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/errors.go
@@ -0,0 +1,159 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type ErrorType uint64
+
+const (
+ ErrorTypeBind ErrorType = 1 << 63 // used when c.Bind() fails
+ ErrorTypeRender ErrorType = 1 << 62 // used when c.Render() fails
+ ErrorTypePrivate ErrorType = 1 << 0
+ ErrorTypePublic ErrorType = 1 << 1
+
+ ErrorTypeAny ErrorType = 1<<64 - 1
+ ErrorTypeNu = 2
+)
+
+type (
+ Error struct {
+ Err error
+ Type ErrorType
+ Meta interface{}
+ }
+
+ errorMsgs []*Error
+)
+
+var _ error = &Error{}
+
+func (msg *Error) SetType(flags ErrorType) *Error {
+ msg.Type = flags
+ return msg
+}
+
+func (msg *Error) SetMeta(data interface{}) *Error {
+ msg.Meta = data
+ return msg
+}
+
+func (msg *Error) JSON() interface{} {
+ json := H{}
+ if msg.Meta != nil {
+ value := reflect.ValueOf(msg.Meta)
+ switch value.Kind() {
+ case reflect.Struct:
+ return msg.Meta
+ case reflect.Map:
+ for _, key := range value.MapKeys() {
+ json[key.String()] = value.MapIndex(key).Interface()
+ }
+ default:
+ json["meta"] = msg.Meta
+ }
+ }
+ if _, ok := json["error"]; !ok {
+ json["error"] = msg.Error()
+ }
+ return json
+}
+
+// Implements the json.Marshaller interface
+func (msg *Error) MarshalJSON() ([]byte, error) {
+ return json.Marshal(msg.JSON())
+}
+
+// Implements the error interface
+func (msg *Error) Error() string {
+ return msg.Err.Error()
+}
+
+func (msg *Error) IsType(flags ErrorType) bool {
+ return (msg.Type & flags) > 0
+}
+
+// Returns a readonly copy filterd the byte.
+// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic
+func (a errorMsgs) ByType(typ ErrorType) errorMsgs {
+ if len(a) == 0 {
+ return nil
+ }
+ if typ == ErrorTypeAny {
+ return a
+ }
+ var result errorMsgs = nil
+ for _, msg := range a {
+ if msg.IsType(typ) {
+ result = append(result, msg)
+ }
+ }
+ return result
+}
+
+// Returns the last error in the slice. It returns nil if the array is empty.
+// Shortcut for errors[len(errors)-1]
+func (a errorMsgs) Last() *Error {
+ length := len(a)
+ if length > 0 {
+ return a[length-1]
+ }
+ return nil
+}
+
+// Returns an array will all the error messages.
+// Example:
+// c.Error(errors.New("first"))
+// c.Error(errors.New("second"))
+// c.Error(errors.New("third"))
+// c.Errors.Errors() // == []string{"first", "second", "third"}
+func (a errorMsgs) Errors() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ errorStrings := make([]string, len(a))
+ for i, err := range a {
+ errorStrings[i] = err.Error()
+ }
+ return errorStrings
+}
+
+func (a errorMsgs) JSON() interface{} {
+ switch len(a) {
+ case 0:
+ return nil
+ case 1:
+ return a.Last().JSON()
+ default:
+ json := make([]interface{}, len(a))
+ for i, err := range a {
+ json[i] = err.JSON()
+ }
+ return json
+ }
+}
+
+func (a errorMsgs) MarshalJSON() ([]byte, error) {
+ return json.Marshal(a.JSON())
+}
+
+func (a errorMsgs) String() string {
+ if len(a) == 0 {
+ return ""
+ }
+ var buffer bytes.Buffer
+ for i, msg := range a {
+ fmt.Fprintf(&buffer, "Error #%02d: %s\n", (i + 1), msg.Err)
+ if msg.Meta != nil {
+ fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta)
+ }
+ }
+ return buffer.String()
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/fs.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/fs.go
new file mode 100644
index 0000000..6af3ded
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/fs.go
@@ -0,0 +1,42 @@
+package gin
+
+import (
+ "net/http"
+ "os"
+)
+
+type (
+ onlyfilesFS struct {
+ fs http.FileSystem
+ }
+ neuteredReaddirFile struct {
+ http.File
+ }
+)
+
+// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used interally
+// in router.Static().
+// if listDirectory == true, then it works the same as http.Dir() otherwise it returns
+// a filesystem that prevents http.FileServer() to list the directory files.
+func Dir(root string, listDirectory bool) http.FileSystem {
+ fs := http.Dir(root)
+ if listDirectory {
+ return fs
+ }
+ return &onlyfilesFS{fs}
+}
+
+// Conforms to http.Filesystem
+func (fs onlyfilesFS) Open(name string) (http.File, error) {
+ f, err := fs.fs.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return neuteredReaddirFile{f}, nil
+}
+
+// Overrides the http.File default implementation
+func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
+ // this disables directory listing
+ return nil, nil
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/gin.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/gin.go
new file mode 100644
index 0000000..fb1df9c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/gin.go
@@ -0,0 +1,370 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "html/template"
+ "net"
+ "net/http"
+ "os"
+ "sync"
+
+ "github.com/gin-gonic/gin/render"
+)
+
+// Framework's version
+const Version = "v1.0rc2"
+
+var default404Body = []byte("404 page not found")
+var default405Body = []byte("405 method not allowed")
+
+type HandlerFunc func(*Context)
+type HandlersChain []HandlerFunc
+
+// Last returns the last handler in the chain. ie. the last handler is the main own.
+func (c HandlersChain) Last() HandlerFunc {
+ length := len(c)
+ if length > 0 {
+ return c[length-1]
+ }
+ return nil
+}
+
+type (
+ RoutesInfo []RouteInfo
+ RouteInfo struct {
+ Method string
+ Path string
+ Handler string
+ }
+
+ // Engine is the framework's instance, it contains the muxer, middleware and configuration settings.
+ // Create an instance of Engine, by using New() or Default()
+ Engine struct {
+ RouterGroup
+ HTMLRender render.HTMLRender
+ allNoRoute HandlersChain
+ allNoMethod HandlersChain
+ noRoute HandlersChain
+ noMethod HandlersChain
+ pool sync.Pool
+ trees methodTrees
+
+ // Enables automatic redirection if the current route can't be matched but a
+ // handler for the path with (without) the trailing slash exists.
+ // For example if /foo/ is requested but a route only exists for /foo, the
+ // client is redirected to /foo with http status code 301 for GET requests
+ // and 307 for all other request methods.
+ RedirectTrailingSlash bool
+
+ // If enabled, the router tries to fix the current request path, if no
+ // handle is registered for it.
+ // First superfluous path elements like ../ or // are removed.
+ // Afterwards the router does a case-insensitive lookup of the cleaned path.
+ // If a handle can be found for this route, the router makes a redirection
+ // to the corrected path with status code 301 for GET requests and 307 for
+ // all other request methods.
+ // For example /FOO and /..//Foo could be redirected to /foo.
+ // RedirectTrailingSlash is independent of this option.
+ RedirectFixedPath bool
+
+ // If enabled, the router checks if another method is allowed for the
+ // current route, if the current request can not be routed.
+ // If this is the case, the request is answered with 'Method Not Allowed'
+ // and HTTP status code 405.
+ // If no other Method is allowed, the request is delegated to the NotFound
+ // handler.
+ HandleMethodNotAllowed bool
+ ForwardedByClientIP bool
+ }
+)
+
+var _ IRouter = &Engine{}
+
+// New returns a new blank Engine instance without any middleware attached.
+// By default the configuration is:
+// - RedirectTrailingSlash: true
+// - RedirectFixedPath: false
+// - HandleMethodNotAllowed: false
+// - ForwardedByClientIP: true
+func New() *Engine {
+ debugPrintWARNINGNew()
+ engine := &Engine{
+ RouterGroup: RouterGroup{
+ Handlers: nil,
+ basePath: "/",
+ root: true,
+ },
+ RedirectTrailingSlash: true,
+ RedirectFixedPath: false,
+ HandleMethodNotAllowed: false,
+ ForwardedByClientIP: true,
+ trees: make(methodTrees, 0, 9),
+ }
+ engine.RouterGroup.engine = engine
+ engine.pool.New = func() interface{} {
+ return engine.allocateContext()
+ }
+ return engine
+}
+
+// Default returns an Engine instance with the Logger and Recovery middleware already attached.
+func Default() *Engine {
+ engine := New()
+ engine.Use(Logger(), Recovery())
+ return engine
+}
+
+func (engine *Engine) allocateContext() *Context {
+ return &Context{engine: engine}
+}
+
+func (engine *Engine) LoadHTMLGlob(pattern string) {
+ if IsDebugging() {
+ debugPrintLoadTemplate(template.Must(template.ParseGlob(pattern)))
+ engine.HTMLRender = render.HTMLDebug{Glob: pattern}
+ } else {
+ templ := template.Must(template.ParseGlob(pattern))
+ engine.SetHTMLTemplate(templ)
+ }
+}
+
+func (engine *Engine) LoadHTMLFiles(files ...string) {
+ if IsDebugging() {
+ engine.HTMLRender = render.HTMLDebug{Files: files}
+ } else {
+ templ := template.Must(template.ParseFiles(files...))
+ engine.SetHTMLTemplate(templ)
+ }
+}
+
+func (engine *Engine) SetHTMLTemplate(templ *template.Template) {
+ if len(engine.trees) > 0 {
+ debugPrintWARNINGSetHTMLTemplate()
+ }
+ engine.HTMLRender = render.HTMLProduction{Template: templ}
+}
+
+// Adds handlers for NoRoute. It return a 404 code by default.
+func (engine *Engine) NoRoute(handlers ...HandlerFunc) {
+ engine.noRoute = handlers
+ engine.rebuild404Handlers()
+}
+
+// Sets the handlers called when... TODO
+func (engine *Engine) NoMethod(handlers ...HandlerFunc) {
+ engine.noMethod = handlers
+ engine.rebuild405Handlers()
+}
+
+// Attachs a global middleware to the router. ie. the middleware attached though Use() will be
+// included in the handlers chain for every single request. Even 404, 405, static files...
+// For example, this is the right place for a logger or error management middleware.
+func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes {
+ engine.RouterGroup.Use(middleware...)
+ engine.rebuild404Handlers()
+ engine.rebuild405Handlers()
+ return engine
+}
+
+func (engine *Engine) rebuild404Handlers() {
+ engine.allNoRoute = engine.combineHandlers(engine.noRoute)
+}
+
+func (engine *Engine) rebuild405Handlers() {
+ engine.allNoMethod = engine.combineHandlers(engine.noMethod)
+}
+
+func (engine *Engine) addRoute(method, path string, handlers HandlersChain) {
+ assert1(path[0] == '/', "path must begin with '/'")
+ assert1(len(method) > 0, "HTTP method can not be empty")
+ assert1(len(handlers) > 0, "there must be at least one handler")
+
+ debugPrintRoute(method, path, handlers)
+ root := engine.trees.get(method)
+ if root == nil {
+ root = new(node)
+ engine.trees = append(engine.trees, methodTree{method: method, root: root})
+ }
+ root.addRoute(path, handlers)
+}
+
+// Routes returns a slice of registered routes, including some useful information, such as:
+// the http method, path and the handler name.
+func (engine *Engine) Routes() (routes RoutesInfo) {
+ for _, tree := range engine.trees {
+ routes = iterate("", tree.method, routes, tree.root)
+ }
+ return routes
+}
+
+func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo {
+ path += root.path
+ if len(root.handlers) > 0 {
+ routes = append(routes, RouteInfo{
+ Method: method,
+ Path: path,
+ Handler: nameOfFunction(root.handlers.Last()),
+ })
+ }
+ for _, child := range root.children {
+ routes = iterate(path, method, routes, child)
+ }
+ return routes
+}
+
+// Run attaches the router to a http.Server and starts listening and serving HTTP requests.
+// It is a shortcut for http.ListenAndServe(addr, router)
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) Run(addr ...string) (err error) {
+ defer func() { debugPrintError(err) }()
+
+ address := resolveAddress(addr)
+ debugPrint("Listening and serving HTTP on %s\n", address)
+ err = http.ListenAndServe(address, engine)
+ return
+}
+
+// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests.
+// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router)
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunTLS(addr string, certFile string, keyFile string) (err error) {
+ debugPrint("Listening and serving HTTPS on %s\n", addr)
+ defer func() { debugPrintError(err) }()
+
+ err = http.ListenAndServeTLS(addr, certFile, keyFile, engine)
+ return
+}
+
+// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests
+// through the specified unix socket (ie. a file).
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunUnix(file string) (err error) {
+ debugPrint("Listening and serving HTTP on unix:/%s", file)
+ defer func() { debugPrintError(err) }()
+
+ os.Remove(file)
+ listener, err := net.Listen("unix", file)
+ if err != nil {
+ return
+ }
+ defer listener.Close()
+ err = http.Serve(listener, engine)
+ return
+}
+
+// Conforms to the http.Handler interface.
+func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ c := engine.pool.Get().(*Context)
+ c.writermem.reset(w)
+ c.Request = req
+ c.reset()
+
+ engine.handleHTTPRequest(c)
+
+ engine.pool.Put(c)
+}
+
+func (engine *Engine) handleHTTPRequest(context *Context) {
+ httpMethod := context.Request.Method
+ path := context.Request.URL.Path
+
+ // Find root of the tree for the given HTTP method
+ t := engine.trees
+ for i, tl := 0, len(t); i < tl; i++ {
+ if t[i].method == httpMethod {
+ root := t[i].root
+ // Find route in tree
+ handlers, params, tsr := root.getValue(path, context.Params)
+ if handlers != nil {
+ context.handlers = handlers
+ context.Params = params
+ context.Next()
+ context.writermem.WriteHeaderNow()
+ return
+
+ } else if httpMethod != "CONNECT" && path != "/" {
+ if tsr && engine.RedirectTrailingSlash {
+ redirectTrailingSlash(context)
+ return
+ }
+ if engine.RedirectFixedPath && redirectFixedPath(context, root, engine.RedirectFixedPath) {
+ return
+ }
+ }
+ break
+ }
+ }
+
+ // TODO: unit test
+ if engine.HandleMethodNotAllowed {
+ for _, tree := range engine.trees {
+ if tree.method != httpMethod {
+ if handlers, _, _ := tree.root.getValue(path, nil); handlers != nil {
+ context.handlers = engine.allNoMethod
+ serveError(context, 405, default405Body)
+ return
+ }
+ }
+ }
+ }
+ context.handlers = engine.allNoRoute
+ serveError(context, 404, default404Body)
+}
+
+var mimePlain = []string{MIMEPlain}
+
+func serveError(c *Context, code int, defaultMessage []byte) {
+ c.writermem.status = code
+ c.Next()
+ if !c.writermem.Written() {
+ if c.writermem.Status() == code {
+ c.writermem.Header()["Content-Type"] = mimePlain
+ c.Writer.Write(defaultMessage)
+ } else {
+ c.writermem.WriteHeaderNow()
+ }
+ }
+}
+
+func redirectTrailingSlash(c *Context) {
+ req := c.Request
+ path := req.URL.Path
+ code := 301 // Permanent redirect, request with GET method
+ if req.Method != "GET" {
+ code = 307
+ }
+
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ req.URL.Path = path[:len(path)-1]
+ } else {
+ req.URL.Path = path + "/"
+ }
+ debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String())
+ http.Redirect(c.Writer, req, req.URL.String(), code)
+ c.writermem.WriteHeaderNow()
+}
+
+func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool {
+ req := c.Request
+ path := req.URL.Path
+
+ fixedPath, found := root.findCaseInsensitivePath(
+ cleanPath(path),
+ trailingSlash,
+ )
+ if found {
+ code := 301 // Permanent redirect, request with GET method
+ if req.Method != "GET" {
+ code = 307
+ }
+ req.URL.Path = string(fixedPath)
+ debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String())
+ http.Redirect(c.Writer, req, req.URL.String(), code)
+ c.writermem.WriteHeaderNow()
+ return true
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/logger.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/logger.go
new file mode 100644
index 0000000..c5d4c3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/logger.go
@@ -0,0 +1,126 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "fmt"
+ "io"
+ "time"
+)
+
+var (
+ green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
+ white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
+ yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
+ red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
+ blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
+ magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
+ cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
+ reset = string([]byte{27, 91, 48, 109})
+)
+
+func ErrorLogger() HandlerFunc {
+ return ErrorLoggerT(ErrorTypeAny)
+}
+
+func ErrorLoggerT(typ ErrorType) HandlerFunc {
+ return func(c *Context) {
+ c.Next()
+ // avoid writting if we already wrote into the response body
+ if !c.Writer.Written() {
+ errors := c.Errors.ByType(typ)
+ if len(errors) > 0 {
+ c.JSON(-1, errors)
+ }
+ }
+ }
+}
+
+// Instances a Logger middleware that will write the logs to gin.DefaultWriter
+// By default gin.DefaultWriter = os.Stdout
+func Logger() HandlerFunc {
+ return LoggerWithWriter(DefaultWriter)
+}
+
+// Instance a Logger middleware with the specified writter buffer.
+// Example: os.Stdout, a file opened in write mode, a socket...
+func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
+ var skip map[string]struct{}
+
+ if length := len(notlogged); length > 0 {
+ skip = make(map[string]struct{}, length)
+
+ for _, path := range notlogged {
+ skip[path] = struct{}{}
+ }
+ }
+
+ return func(c *Context) {
+ // Start timer
+ start := time.Now()
+ path := c.Request.URL.Path
+
+ // Process request
+ c.Next()
+
+ // Log only when path is not being skipped
+ if _, ok := skip[path]; !ok {
+ // Stop timer
+ end := time.Now()
+ latency := end.Sub(start)
+
+ clientIP := c.ClientIP()
+ method := c.Request.Method
+ statusCode := c.Writer.Status()
+ statusColor := colorForStatus(statusCode)
+ methodColor := colorForMethod(method)
+ comment := c.Errors.ByType(ErrorTypePrivate).String()
+
+ fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %s |%s %s %-7s %s\n%s",
+ end.Format("2006/01/02 - 15:04:05"),
+ statusColor, statusCode, reset,
+ latency,
+ clientIP,
+ methodColor, reset, method,
+ path,
+ comment,
+ )
+ }
+ }
+}
+
+func colorForStatus(code int) string {
+ switch {
+ case code >= 200 && code < 300:
+ return green
+ case code >= 300 && code < 400:
+ return white
+ case code >= 400 && code < 500:
+ return yellow
+ default:
+ return red
+ }
+}
+
+func colorForMethod(method string) string {
+ switch method {
+ case "GET":
+ return blue
+ case "POST":
+ return cyan
+ case "PUT":
+ return yellow
+ case "DELETE":
+ return red
+ case "PATCH":
+ return green
+ case "HEAD":
+ return magenta
+ case "OPTIONS":
+ return white
+ default:
+ return reset
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/logo.jpg b/Godeps/_workspace/src/github.com/gin-gonic/gin/logo.jpg
new file mode 100644
index 0000000..bb51852
Binary files /dev/null and b/Godeps/_workspace/src/github.com/gin-gonic/gin/logo.jpg differ
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/mode.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/mode.go
new file mode 100644
index 0000000..bf9e995
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/mode.go
@@ -0,0 +1,69 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "io"
+ "os"
+
+ "github.com/gin-gonic/gin/binding"
+)
+
+const ENV_GIN_MODE = "GIN_MODE"
+
+const (
+ DebugMode string = "debug"
+ ReleaseMode string = "release"
+ TestMode string = "test"
+)
+const (
+ debugCode = iota
+ releaseCode = iota
+ testCode = iota
+)
+
+// DefaultWriter is the default io.Writer used the Gin for debug output and
+// middleware output like Logger() or Recovery().
+// Note that both Logger and Recovery provides custom ways to configure their
+// output io.Writer.
+// To support coloring in Windows use:
+// import "github.com/mattn/go-colorable"
+// gin.DefaultWriter = colorable.NewColorableStdout()
+var DefaultWriter io.Writer = os.Stdout
+var DefaultErrorWriter io.Writer = os.Stderr
+
+var ginMode int = debugCode
+var modeName string = DebugMode
+
+func init() {
+ mode := os.Getenv(ENV_GIN_MODE)
+ if len(mode) == 0 {
+ SetMode(DebugMode)
+ } else {
+ SetMode(mode)
+ }
+}
+
+func SetMode(value string) {
+ switch value {
+ case DebugMode:
+ ginMode = debugCode
+ case ReleaseMode:
+ ginMode = releaseCode
+ case TestMode:
+ ginMode = testCode
+ default:
+ panic("gin mode unknown: " + value)
+ }
+ modeName = value
+}
+
+func DisableBindValidation() {
+ binding.Validator = nil
+}
+
+func Mode() string {
+ return modeName
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/path.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/path.go
new file mode 100644
index 0000000..43cdd04
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/path.go
@@ -0,0 +1,123 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Based on the path package, Copyright 2009 The Go Authors.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package gin
+
+// CleanPath is the URL version of path.Clean, it returns a canonical URL path
+// for p, eliminating . and .. elements.
+//
+// The following rules are applied iteratively until no further processing can
+// be done:
+// 1. Replace multiple slashes with a single slash.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path.
+//
+// If the result of this process is an empty string, "/" is returned
+func cleanPath(p string) string {
+ // Turn empty string into "/"
+ if p == "" {
+ return "/"
+ }
+
+ n := len(p)
+ var buf []byte
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+
+ // path must start with '/'
+ r := 1
+ w := 1
+
+ if p[0] != '/' {
+ r = 0
+ buf = make([]byte, n+1)
+ buf[0] = '/'
+ }
+
+ trailing := n > 2 && p[n-1] == '/'
+
+ // A bit more clunky without a 'lazybuf' like the path package, but the loop
+ // gets completely inlined (bufApp). So in contrast to the path package this
+ // loop has no expensive function calls (except 1x make)
+
+ for r < n {
+ switch {
+ case p[r] == '/':
+ // empty path element, trailing slash is added after the end
+ r++
+
+ case p[r] == '.' && r+1 == n:
+ trailing = true
+ r++
+
+ case p[r] == '.' && p[r+1] == '/':
+ // . element
+ r++
+
+ case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
+ // .. element: remove to last /
+ r += 2
+
+ if w > 1 {
+ // can backtrack
+ w--
+
+ if buf == nil {
+ for w > 1 && p[w] != '/' {
+ w--
+ }
+ } else {
+ for w > 1 && buf[w] != '/' {
+ w--
+ }
+ }
+ }
+
+ default:
+ // real path element.
+ // add slash if needed
+ if w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ // copy element
+ for r < n && p[r] != '/' {
+ bufApp(&buf, p, w, p[r])
+ w++
+ r++
+ }
+ }
+ }
+
+ // re-append trailing slash
+ if trailing && w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ if buf == nil {
+ return p[:w]
+ }
+ return string(buf[:w])
+}
+
+// internal helper to lazily create a buffer if necessary
+func bufApp(buf *[]byte, s string, w int, c byte) {
+ if *buf == nil {
+ if s[w] == c {
+ return
+ }
+
+ *buf = make([]byte, len(s))
+ copy(*buf, s[:w])
+ }
+ (*buf)[w] = c
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/recovery.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/recovery.go
new file mode 100644
index 0000000..c502f35
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/recovery.go
@@ -0,0 +1,108 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http/httputil"
+ "runtime"
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+ slash = []byte("/")
+)
+
+// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
+func Recovery() HandlerFunc {
+ return RecoveryWithWriter(DefaultErrorWriter)
+}
+
+func RecoveryWithWriter(out io.Writer) HandlerFunc {
+ var logger *log.Logger
+ if out != nil {
+ logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags)
+ }
+ return func(c *Context) {
+ defer func() {
+ if err := recover(); err != nil {
+ if logger != nil {
+ stack := stack(3)
+ httprequest, _ := httputil.DumpRequest(c.Request, false)
+ logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset)
+ }
+ c.AbortWithStatus(500)
+ }
+ }()
+ c.Next()
+ }
+}
+
+// stack returns a nicely formated stack frame, skipping skip frames
+func stack(skip int) []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := skip; ; i++ { // Skip the expected number of frames
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFile = file
+ }
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.TrimSpace(lines[n])
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ // Also the package path might contains dot (e.g. code.google.com/...),
+ // so first eliminate the path prefix
+ if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+ name = name[lastslash+1:]
+ }
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/data.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/data.go
new file mode 100644
index 0000000..efa75d5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/data.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import "net/http"
+
+type Data struct {
+ ContentType string
+ Data []byte
+}
+
+func (r Data) Render(w http.ResponseWriter) error {
+ if len(r.ContentType) > 0 {
+ w.Header()["Content-Type"] = []string{r.ContentType}
+ }
+ w.Write(r.Data)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/html.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/html.go
new file mode 100644
index 0000000..01f6bf2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/html.go
@@ -0,0 +1,67 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "html/template"
+ "net/http"
+)
+
+type (
+ HTMLRender interface {
+ Instance(string, interface{}) Render
+ }
+
+ HTMLProduction struct {
+ Template *template.Template
+ }
+
+ HTMLDebug struct {
+ Files []string
+ Glob string
+ }
+
+ HTML struct {
+ Template *template.Template
+ Name string
+ Data interface{}
+ }
+)
+
+var htmlContentType = []string{"text/html; charset=utf-8"}
+
+func (r HTMLProduction) Instance(name string, data interface{}) Render {
+ return HTML{
+ Template: r.Template,
+ Name: name,
+ Data: data,
+ }
+}
+
+func (r HTMLDebug) Instance(name string, data interface{}) Render {
+ return HTML{
+ Template: r.loadTemplate(),
+ Name: name,
+ Data: data,
+ }
+}
+func (r HTMLDebug) loadTemplate() *template.Template {
+ if len(r.Files) > 0 {
+ return template.Must(template.ParseFiles(r.Files...))
+ }
+ if len(r.Glob) > 0 {
+ return template.Must(template.ParseGlob(r.Glob))
+ }
+ panic("the HTML debug render was created without files or glob pattern")
+}
+
+func (r HTML) Render(w http.ResponseWriter) error {
+ writeContentType(w, htmlContentType)
+ if len(r.Name) == 0 {
+ return r.Template.Execute(w, r.Data)
+ } else {
+ return r.Template.ExecuteTemplate(w, r.Name, r.Data)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/json.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/json.go
new file mode 100644
index 0000000..32e6058
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/json.go
@@ -0,0 +1,41 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+type (
+ JSON struct {
+ Data interface{}
+ }
+
+ IndentedJSON struct {
+ Data interface{}
+ }
+)
+
+var jsonContentType = []string{"application/json; charset=utf-8"}
+
+func (r JSON) Render(w http.ResponseWriter) error {
+ return WriteJSON(w, r.Data)
+}
+
+func (r IndentedJSON) Render(w http.ResponseWriter) error {
+ writeContentType(w, jsonContentType)
+ jsonBytes, err := json.MarshalIndent(r.Data, "", " ")
+ if err != nil {
+ return err
+ }
+ w.Write(jsonBytes)
+ return nil
+}
+
+func WriteJSON(w http.ResponseWriter, obj interface{}) error {
+ writeContentType(w, jsonContentType)
+ return json.NewEncoder(w).Encode(obj)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/redirect.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/redirect.go
new file mode 100644
index 0000000..bd48d7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/redirect.go
@@ -0,0 +1,24 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "fmt"
+ "net/http"
+)
+
+type Redirect struct {
+ Code int
+ Request *http.Request
+ Location string
+}
+
+func (r Redirect) Render(w http.ResponseWriter) error {
+ if (r.Code < 300 || r.Code > 308) && r.Code != 201 {
+ panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code))
+ }
+ http.Redirect(w, r.Request, r.Location, r.Code)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/render.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/render.go
new file mode 100644
index 0000000..994fcd7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/render.go
@@ -0,0 +1,30 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import "net/http"
+
+type Render interface {
+ Render(http.ResponseWriter) error
+}
+
+var (
+ _ Render = JSON{}
+ _ Render = IndentedJSON{}
+ _ Render = XML{}
+ _ Render = String{}
+ _ Render = Redirect{}
+ _ Render = Data{}
+ _ Render = HTML{}
+ _ HTMLRender = HTMLDebug{}
+ _ HTMLRender = HTMLProduction{}
+)
+
+func writeContentType(w http.ResponseWriter, value []string) {
+ header := w.Header()
+ if val := header["Content-Type"]; len(val) == 0 {
+ header["Content-Type"] = value
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/text.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/text.go
new file mode 100644
index 0000000..5a9e280
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/text.go
@@ -0,0 +1,33 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+)
+
+type String struct {
+ Format string
+ Data []interface{}
+}
+
+var plainContentType = []string{"text/plain; charset=utf-8"}
+
+func (r String) Render(w http.ResponseWriter) error {
+ WriteString(w, r.Format, r.Data)
+ return nil
+}
+
+func WriteString(w http.ResponseWriter, format string, data []interface{}) {
+ writeContentType(w, plainContentType)
+
+ if len(data) > 0 {
+ fmt.Fprintf(w, format, data...)
+ } else {
+ io.WriteString(w, format)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/render/xml.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/xml.go
new file mode 100644
index 0000000..be22e6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/render/xml.go
@@ -0,0 +1,21 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+type XML struct {
+ Data interface{}
+}
+
+var xmlContentType = []string{"application/xml; charset=utf-8"}
+
+func (r XML) Render(w http.ResponseWriter) error {
+ writeContentType(w, xmlContentType)
+ return xml.NewEncoder(w).Encode(r.Data)
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/response_writer.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/response_writer.go
new file mode 100644
index 0000000..fcbe230
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/response_writer.go
@@ -0,0 +1,116 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ noWritten = -1
+ defaultStatus = 200
+)
+
+type (
+ ResponseWriter interface {
+ http.ResponseWriter
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+
+ // Returns the HTTP response status code of the current request.
+ Status() int
+
+ // Returns the number of bytes already written into the response http body.
+ // See Written()
+ Size() int
+
+ // Writes the string into the response body.
+ WriteString(string) (int, error)
+
+ // Returns true if the response body was already written.
+ Written() bool
+
+ // Forces to write the http header (status code + headers).
+ WriteHeaderNow()
+ }
+
+ responseWriter struct {
+ http.ResponseWriter
+ size int
+ status int
+ }
+)
+
+var _ ResponseWriter = &responseWriter{}
+
+func (w *responseWriter) reset(writer http.ResponseWriter) {
+ w.ResponseWriter = writer
+ w.size = noWritten
+ w.status = defaultStatus
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ if code > 0 && w.status != code {
+ if w.Written() {
+ debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code)
+ }
+ w.status = code
+ }
+}
+
+func (w *responseWriter) WriteHeaderNow() {
+ if !w.Written() {
+ w.size = 0
+ w.ResponseWriter.WriteHeader(w.status)
+ }
+}
+
+func (w *responseWriter) Write(data []byte) (n int, err error) {
+ w.WriteHeaderNow()
+ n, err = w.ResponseWriter.Write(data)
+ w.size += n
+ return
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ w.WriteHeaderNow()
+ n, err = io.WriteString(w.ResponseWriter, s)
+ w.size += n
+ return
+}
+
+func (w *responseWriter) Status() int {
+ return w.status
+}
+
+func (w *responseWriter) Size() int {
+ return w.size
+}
+
+func (w *responseWriter) Written() bool {
+ return w.size != noWritten
+}
+
+// Implements the http.Hijacker interface
+func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if w.size < 0 {
+ w.size = 0
+ }
+ return w.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+// Implements the http.CloseNotify interface
+func (w *responseWriter) CloseNotify() <-chan bool {
+ return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Implements the http.Flush interface
+func (w *responseWriter) Flush() {
+ w.ResponseWriter.(http.Flusher).Flush()
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/routergroup.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/routergroup.go
new file mode 100644
index 0000000..f22729b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/routergroup.go
@@ -0,0 +1,215 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "net/http"
+ "path"
+ "regexp"
+ "strings"
+)
+
+type (
+ IRouter interface {
+ IRoutes
+ Group(string, ...HandlerFunc) *RouterGroup
+ }
+
+ IRoutes interface {
+ Use(...HandlerFunc) IRoutes
+
+ Handle(string, string, ...HandlerFunc) IRoutes
+ Any(string, ...HandlerFunc) IRoutes
+ GET(string, ...HandlerFunc) IRoutes
+ POST(string, ...HandlerFunc) IRoutes
+ DELETE(string, ...HandlerFunc) IRoutes
+ PATCH(string, ...HandlerFunc) IRoutes
+ PUT(string, ...HandlerFunc) IRoutes
+ OPTIONS(string, ...HandlerFunc) IRoutes
+ HEAD(string, ...HandlerFunc) IRoutes
+
+ StaticFile(string, string) IRoutes
+ Static(string, string) IRoutes
+ StaticFS(string, http.FileSystem) IRoutes
+ }
+
+ // RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix
+ // and an array of handlers (middleware)
+ RouterGroup struct {
+ Handlers HandlersChain
+ basePath string
+ engine *Engine
+ root bool
+ }
+)
+
+var _ IRouter = &RouterGroup{}
+
+// Use adds middleware to the group, see example code in github.
+func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes {
+ group.Handlers = append(group.Handlers, middleware...)
+ return group.returnObj()
+}
+
+// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix.
+// For example, all the routes that use a common middlware for authorization could be grouped.
+func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup {
+ return &RouterGroup{
+ Handlers: group.combineHandlers(handlers),
+ basePath: group.calculateAbsolutePath(relativePath),
+ engine: group.engine,
+ }
+}
+
+func (group *RouterGroup) BasePath() string {
+ return group.basePath
+}
+
+func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes {
+ absolutePath := group.calculateAbsolutePath(relativePath)
+ handlers = group.combineHandlers(handlers)
+ group.engine.addRoute(httpMethod, absolutePath, handlers)
+ return group.returnObj()
+}
+
+// Handle registers a new request handle and middleware with the given path and method.
+// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes.
+// See the example code in github.
+//
+// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
+// functions can be used.
+//
+// This function is intended for bulk loading and to allow the usage of less
+// frequently used, non-standardized or custom methods (e.g. for internal
+// communication with a proxy).
+func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes {
+ if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil {
+ panic("http method " + httpMethod + " is not valid")
+ }
+ return group.handle(httpMethod, relativePath, handlers)
+}
+
+// POST is a shortcut for router.Handle("POST", path, handle)
+func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("POST", relativePath, handlers)
+}
+
+// GET is a shortcut for router.Handle("GET", path, handle)
+func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("GET", relativePath, handlers)
+}
+
+// DELETE is a shortcut for router.Handle("DELETE", path, handle)
+func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("DELETE", relativePath, handlers)
+}
+
+// PATCH is a shortcut for router.Handle("PATCH", path, handle)
+func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("PATCH", relativePath, handlers)
+}
+
+// PUT is a shortcut for router.Handle("PUT", path, handle)
+func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("PUT", relativePath, handlers)
+}
+
+// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle)
+func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("OPTIONS", relativePath, handlers)
+}
+
+// HEAD is a shortcut for router.Handle("HEAD", path, handle)
+func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle("HEAD", relativePath, handlers)
+}
+
+// Any registers a route that matches all the HTTP methods.
+// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE
+func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes {
+ group.handle("GET", relativePath, handlers)
+ group.handle("POST", relativePath, handlers)
+ group.handle("PUT", relativePath, handlers)
+ group.handle("PATCH", relativePath, handlers)
+ group.handle("HEAD", relativePath, handlers)
+ group.handle("OPTIONS", relativePath, handlers)
+ group.handle("DELETE", relativePath, handlers)
+ group.handle("CONNECT", relativePath, handlers)
+ group.handle("TRACE", relativePath, handlers)
+ return group.returnObj()
+}
+
+// StaticFile registers a single route in order to server a single file of the local filesystem.
+// router.StaticFile("favicon.ico", "./resources/favicon.ico")
+func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes {
+ if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
+ panic("URL parameters can not be used when serving a static file")
+ }
+ handler := func(c *Context) {
+ c.File(filepath)
+ }
+ group.GET(relativePath, handler)
+ group.HEAD(relativePath, handler)
+ return group.returnObj()
+}
+
+// Static serves files from the given file system root.
+// Internally a http.FileServer is used, therefore http.NotFound is used instead
+// of the Router's NotFound handler.
+// To use the operating system's file system implementation,
+// use :
+// router.Static("/static", "/var/www")
+func (group *RouterGroup) Static(relativePath, root string) IRoutes {
+ return group.StaticFS(relativePath, Dir(root, false))
+}
+
+// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead.
+// Gin by default user: gin.Dir()
+func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes {
+ if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
+ panic("URL parameters can not be used when serving a static folder")
+ }
+ handler := group.createStaticHandler(relativePath, fs)
+ urlPattern := path.Join(relativePath, "/*filepath")
+
+ // Register GET and HEAD handlers
+ group.GET(urlPattern, handler)
+ group.HEAD(urlPattern, handler)
+ return group.returnObj()
+}
+
+func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc {
+ absolutePath := group.calculateAbsolutePath(relativePath)
+ fileServer := http.StripPrefix(absolutePath, http.FileServer(fs))
+ _, nolisting := fs.(*onlyfilesFS)
+ return func(c *Context) {
+ if nolisting {
+ c.Writer.WriteHeader(404)
+ }
+ fileServer.ServeHTTP(c.Writer, c.Request)
+ }
+}
+
+func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain {
+ finalSize := len(group.Handlers) + len(handlers)
+ if finalSize >= int(abortIndex) {
+ panic("too many handlers")
+ }
+ mergedHandlers := make(HandlersChain, finalSize)
+ copy(mergedHandlers, group.Handlers)
+ copy(mergedHandlers[len(group.Handlers):], handlers)
+ return mergedHandlers
+}
+
+func (group *RouterGroup) calculateAbsolutePath(relativePath string) string {
+ return joinPaths(group.basePath, relativePath)
+}
+
+func (group *RouterGroup) returnObj() IRoutes {
+ if group.root {
+ return group.engine
+ }
+ return group
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/test_helpers.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/test_helpers.go
new file mode 100644
index 0000000..7d8020c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/test_helpers.go
@@ -0,0 +1,14 @@
+package gin
+
+import (
+ "net/http/httptest"
+)
+
+func CreateTestContext() (c *Context, w *httptest.ResponseRecorder, r *Engine) {
+ w = httptest.NewRecorder()
+ r = New()
+ c = r.allocateContext()
+ c.reset()
+ c.writermem.reset(w)
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/tree.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/tree.go
new file mode 100644
index 0000000..4f2082e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/tree.go
@@ -0,0 +1,603 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package gin
+
+import (
+ "strings"
+ "unicode"
+)
+
+// Param is a single URL parameter, consisting of a key and a value.
+type Param struct {
+ Key string
+ Value string
+}
+
+// Params is a Param-slice, as returned by the router.
+// The slice is ordered, the first URL parameter is also the first slice value.
+// It is therefore safe to read values by the index.
+type Params []Param
+
+// ByName returns the value of the first Param which key matches the given name.
+// If no matching Param is found, an empty string is returned.
+func (ps Params) Get(name string) (string, bool) {
+ for _, entry := range ps {
+ if entry.Key == name {
+ return entry.Value, true
+ }
+ }
+ return "", false
+}
+
+func (ps Params) ByName(name string) (va string) {
+ va, _ = ps.Get(name)
+ return
+}
+
+type methodTree struct {
+ method string
+ root *node
+}
+
+type methodTrees []methodTree
+
+func (trees methodTrees) get(method string) *node {
+ for _, tree := range trees {
+ if tree.method == method {
+ return tree.root
+ }
+ }
+ return nil
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+func countParams(path string) uint8 {
+ var n uint
+ for i := 0; i < len(path); i++ {
+ if path[i] != ':' && path[i] != '*' {
+ continue
+ }
+ n++
+ }
+ if n >= 255 {
+ return 255
+ }
+ return uint8(n)
+}
+
+type nodeType uint8
+
+const (
+ static nodeType = iota // default
+ root
+ param
+ catchAll
+)
+
+type node struct {
+ path string
+ wildChild bool
+ nType nodeType
+ maxParams uint8
+ indices string
+ children []*node
+ handlers HandlersChain
+ priority uint32
+}
+
+// increments priority of the given child and reorders if necessary
+func (n *node) incrementChildPrio(pos int) int {
+ n.children[pos].priority++
+ prio := n.children[pos].priority
+
+ // adjust position (move to front)
+ newPos := pos
+ for newPos > 0 && n.children[newPos-1].priority < prio {
+ // swap node positions
+ tmpN := n.children[newPos-1]
+ n.children[newPos-1] = n.children[newPos]
+ n.children[newPos] = tmpN
+
+ newPos--
+ }
+
+ // build new index char string
+ if newPos != pos {
+ n.indices = n.indices[:newPos] + // unchanged prefix, might be empty
+ n.indices[pos:pos+1] + // the index char we move
+ n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos'
+ }
+
+ return newPos
+}
+
+// addRoute adds a node with the given handle to the path.
+// Not concurrency-safe!
+func (n *node) addRoute(path string, handlers HandlersChain) {
+ fullPath := path
+ n.priority++
+ numParams := countParams(path)
+
+ // non-empty tree
+ if len(n.path) > 0 || len(n.children) > 0 {
+ walk:
+ for {
+ // Update maxParams of the current node
+ if numParams > n.maxParams {
+ n.maxParams = numParams
+ }
+
+ // Find the longest common prefix.
+ // This also implies that the common prefix contains no ':' or '*'
+ // since the existing key can't contain those chars.
+ i := 0
+ max := min(len(path), len(n.path))
+ for i < max && path[i] == n.path[i] {
+ i++
+ }
+
+ // Split edge
+ if i < len(n.path) {
+ child := node{
+ path: n.path[i:],
+ wildChild: n.wildChild,
+ indices: n.indices,
+ children: n.children,
+ handlers: n.handlers,
+ priority: n.priority - 1,
+ }
+
+ // Update maxParams (max of all children)
+ for i := range child.children {
+ if child.children[i].maxParams > child.maxParams {
+ child.maxParams = child.children[i].maxParams
+ }
+ }
+
+ n.children = []*node{&child}
+ // []byte for proper unicode char conversion, see #65
+ n.indices = string([]byte{n.path[i]})
+ n.path = path[:i]
+ n.handlers = nil
+ n.wildChild = false
+ }
+
+ // Make new node a child of this node
+ if i < len(path) {
+ path = path[i:]
+
+ if n.wildChild {
+ n = n.children[0]
+ n.priority++
+
+ // Update maxParams of the child node
+ if numParams > n.maxParams {
+ n.maxParams = numParams
+ }
+ numParams--
+
+ // Check if the wildcard matches
+ if len(path) >= len(n.path) && n.path == path[:len(n.path)] {
+ // check for longer wildcard, e.g. :name and :names
+ if len(n.path) >= len(path) || path[len(n.path)] == '/' {
+ continue walk
+ }
+ }
+
+ panic("path segment '" + path +
+ "' conflicts with existing wildcard '" + n.path +
+ "' in path '" + fullPath + "'")
+ }
+
+ c := path[0]
+
+ // slash after param
+ if n.nType == param && c == '/' && len(n.children) == 1 {
+ n = n.children[0]
+ n.priority++
+ continue walk
+ }
+
+ // Check if a child with the next path byte exists
+ for i := 0; i < len(n.indices); i++ {
+ if c == n.indices[i] {
+ i = n.incrementChildPrio(i)
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Otherwise insert it
+ if c != ':' && c != '*' {
+ // []byte for proper unicode char conversion, see #65
+ n.indices += string([]byte{c})
+ child := &node{
+ maxParams: numParams,
+ }
+ n.children = append(n.children, child)
+ n.incrementChildPrio(len(n.indices) - 1)
+ n = child
+ }
+ n.insertChild(numParams, path, fullPath, handlers)
+ return
+
+ } else if i == len(path) { // Make node a (in-path) leaf
+ if n.handlers != nil {
+ panic("handlers are already registered for path ''" + fullPath + "'")
+ }
+ n.handlers = handlers
+ }
+ return
+ }
+ } else { // Empty tree
+ n.insertChild(numParams, path, fullPath, handlers)
+ n.nType = root
+ }
+}
+
+func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) {
+ var offset int // already handled bytes of the path
+
+ // find prefix until first wildcard (beginning with ':'' or '*'')
+ for i, max := 0, len(path); numParams > 0; i++ {
+ c := path[i]
+ if c != ':' && c != '*' {
+ continue
+ }
+
+ // find wildcard end (either '/' or path end)
+ end := i + 1
+ for end < max && path[end] != '/' {
+ switch path[end] {
+ // the wildcard name must not contain ':' and '*'
+ case ':', '*':
+ panic("only one wildcard per path segment is allowed, has: '" +
+ path[i:] + "' in path '" + fullPath + "'")
+ default:
+ end++
+ }
+ }
+
+ // check if this Node existing children which would be
+ // unreachable if we insert the wildcard here
+ if len(n.children) > 0 {
+ panic("wildcard route '" + path[i:end] +
+ "' conflicts with existing children in path '" + fullPath + "'")
+ }
+
+ // check if the wildcard has a name
+ if end-i < 2 {
+ panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
+ }
+
+ if c == ':' { // param
+ // split path at the beginning of the wildcard
+ if i > 0 {
+ n.path = path[offset:i]
+ offset = i
+ }
+
+ child := &node{
+ nType: param,
+ maxParams: numParams,
+ }
+ n.children = []*node{child}
+ n.wildChild = true
+ n = child
+ n.priority++
+ numParams--
+
+ // if the path doesn't end with the wildcard, then there
+ // will be another non-wildcard subpath starting with '/'
+ if end < max {
+ n.path = path[offset:end]
+ offset = end
+
+ child := &node{
+ maxParams: numParams,
+ priority: 1,
+ }
+ n.children = []*node{child}
+ n = child
+ }
+
+ } else { // catchAll
+ if end != max || numParams > 1 {
+ panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
+ }
+
+ if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
+ panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
+ }
+
+ // currently fixed width 1 for '/'
+ i--
+ if path[i] != '/' {
+ panic("no / before catch-all in path '" + fullPath + "'")
+ }
+
+ n.path = path[offset:i]
+
+ // first node: catchAll node with empty path
+ child := &node{
+ wildChild: true,
+ nType: catchAll,
+ maxParams: 1,
+ }
+ n.children = []*node{child}
+ n.indices = string(path[i])
+ n = child
+ n.priority++
+
+ // second node: node holding the variable
+ child = &node{
+ path: path[i:],
+ nType: catchAll,
+ maxParams: 1,
+ handlers: handlers,
+ priority: 1,
+ }
+ n.children = []*node{child}
+
+ return
+ }
+ }
+
+ // insert remaining path part and handle to the leaf
+ n.path = path[offset:]
+ n.handlers = handlers
+}
+
+// Returns the handle registered with the given path (key). The values of
+// wildcards are saved to a map.
+// If no handle can be found, a TSR (trailing slash redirect) recommendation is
+// made if a handle exists with an extra (without the) trailing slash for the
+// given path.
+func (n *node) getValue(path string, po Params) (handlers HandlersChain, p Params, tsr bool) {
+ p = po
+walk: // Outer loop for walking the tree
+ for {
+ if len(path) > len(n.path) {
+ if path[:len(n.path)] == n.path {
+ path = path[len(n.path):]
+ // If this node does not have a wildcard (param or catchAll)
+ // child, we can just look up the next child node and continue
+ // to walk down the tree
+ if !n.wildChild {
+ c := path[0]
+ for i := 0; i < len(n.indices); i++ {
+ if c == n.indices[i] {
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Nothing found.
+ // We can recommend to redirect to the same URL without a
+ // trailing slash if a leaf exists for that path.
+ tsr = (path == "/" && n.handlers != nil)
+ return
+ }
+
+ // handle wildcard child
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // find param end (either '/' or path end)
+ end := 0
+ for end < len(path) && path[end] != '/' {
+ end++
+ }
+
+ // save param value
+ if cap(p) < int(n.maxParams) {
+ p = make(Params, 0, n.maxParams)
+ }
+ i := len(p)
+ p = p[:i+1] // expand slice within preallocated capacity
+ p[i].Key = n.path[1:]
+ p[i].Value = path[:end]
+
+ // we need to go deeper!
+ if end < len(path) {
+ if len(n.children) > 0 {
+ path = path[end:]
+ n = n.children[0]
+ continue walk
+ }
+
+ // ... but we can't
+ tsr = (len(path) == end+1)
+ return
+ }
+
+ if handlers = n.handlers; handlers != nil {
+ return
+ } else if len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for TSR recommendation
+ n = n.children[0]
+ tsr = (n.path == "/" && n.handlers != nil)
+ }
+
+ return
+
+ case catchAll:
+ // save param value
+ if cap(p) < int(n.maxParams) {
+ p = make(Params, 0, n.maxParams)
+ }
+ i := len(p)
+ p = p[:i+1] // expand slice within preallocated capacity
+ p[i].Key = n.path[2:]
+ p[i].Value = path
+
+ handlers = n.handlers
+ return
+
+ default:
+ panic("invalid node type")
+ }
+ }
+ } else if path == n.path {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if handlers = n.handlers; handlers != nil {
+ return
+ }
+
+ if path == "/" && n.wildChild && n.nType != root {
+ tsr = true
+ return
+ }
+
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for trailing slash recommendation
+ for i := 0; i < len(n.indices); i++ {
+ if n.indices[i] == '/' {
+ n = n.children[i]
+ tsr = (len(n.path) == 1 && n.handlers != nil) ||
+ (n.nType == catchAll && n.children[0].handlers != nil)
+ return
+ }
+ }
+
+ return
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL with an
+ // extra trailing slash if a leaf exists for that path
+ tsr = (path == "/") ||
+ (len(n.path) == len(path)+1 && n.path[len(path)] == '/' &&
+ path == n.path[:len(n.path)-1] && n.handlers != nil)
+ return
+ }
+}
+
+// Makes a case-insensitive lookup of the given path and tries to find a handler.
+// It can optionally also fix trailing slashes.
+// It returns the case-corrected path and a bool indicating whether the lookup
+// was successful.
+func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
+ ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory
+
+ // Outer loop for walking the tree
+ for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) {
+ path = path[len(n.path):]
+ ciPath = append(ciPath, n.path...)
+
+ if len(path) > 0 {
+ // If this node does not have a wildcard (param or catchAll) child,
+ // we can just look up the next child node and continue to walk down
+ // the tree
+ if !n.wildChild {
+ r := unicode.ToLower(rune(path[0]))
+ for i, index := range n.indices {
+ // must use recursive approach since both index and
+ // ToLower(index) could exist. We must check both.
+ if r == unicode.ToLower(index) {
+ out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash)
+ if found {
+ return append(ciPath, out...), true
+ }
+ }
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL
+ // without a trailing slash if a leaf exists for that path
+ found = (fixTrailingSlash && path == "/" && n.handlers != nil)
+ return
+ }
+
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // find param end (either '/' or path end)
+ k := 0
+ for k < len(path) && path[k] != '/' {
+ k++
+ }
+
+ // add param value to case insensitive path
+ ciPath = append(ciPath, path[:k]...)
+
+ // we need to go deeper!
+ if k < len(path) {
+ if len(n.children) > 0 {
+ path = path[k:]
+ n = n.children[0]
+ continue
+ }
+
+ // ... but we can't
+ if fixTrailingSlash && len(path) == k+1 {
+ return ciPath, true
+ }
+ return
+ }
+
+ if n.handlers != nil {
+ return ciPath, true
+ } else if fixTrailingSlash && len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists
+ n = n.children[0]
+ if n.path == "/" && n.handlers != nil {
+ return append(ciPath, '/'), true
+ }
+ }
+ return
+
+ case catchAll:
+ return append(ciPath, path...), true
+
+ default:
+ panic("invalid node type")
+ }
+ } else {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if n.handlers != nil {
+ return ciPath, true
+ }
+
+ // No handle found.
+ // Try to fix the path by adding a trailing slash
+ if fixTrailingSlash {
+ for i := 0; i < len(n.indices); i++ {
+ if n.indices[i] == '/' {
+ n = n.children[i]
+ if (len(n.path) == 1 && n.handlers != nil) ||
+ (n.nType == catchAll && n.children[0].handlers != nil) {
+ return append(ciPath, '/'), true
+ }
+ return
+ }
+ }
+ }
+ return
+ }
+ }
+
+ // Nothing found.
+ // Try to fix the path by adding / removing a trailing slash
+ if fixTrailingSlash {
+ if path == "/" {
+ return ciPath, true
+ }
+ if len(path)+1 == len(n.path) && n.path[len(path)] == '/' &&
+ strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) &&
+ n.handlers != nil {
+ return append(ciPath, n.path...), true
+ }
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/utils.go b/Godeps/_workspace/src/github.com/gin-gonic/gin/utils.go
new file mode 100644
index 0000000..2814791
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/utils.go
@@ -0,0 +1,155 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "encoding/xml"
+ "net/http"
+ "os"
+ "path"
+ "reflect"
+ "runtime"
+ "strings"
+)
+
+const BindKey = "_gin-gonic/gin/bindkey"
+
+func Bind(val interface{}) HandlerFunc {
+ value := reflect.ValueOf(val)
+ if value.Kind() == reflect.Ptr {
+ panic(`Bind struct can not be a pointer. Example:
+ Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{})
+`)
+ }
+ typ := value.Type()
+
+ return func(c *Context) {
+ obj := reflect.New(typ).Interface()
+ if c.Bind(obj) == nil {
+ c.Set(BindKey, obj)
+ }
+ }
+}
+
+func WrapF(f http.HandlerFunc) HandlerFunc {
+ return func(c *Context) {
+ f(c.Writer, c.Request)
+ }
+}
+
+func WrapH(h http.Handler) HandlerFunc {
+ return func(c *Context) {
+ h.ServeHTTP(c.Writer, c.Request)
+ }
+}
+
+type H map[string]interface{}
+
+// Allows type H to be used with xml.Marshal
+func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ start.Name = xml.Name{
+ Space: "",
+ Local: "map",
+ }
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+ for key, value := range h {
+ elem := xml.StartElement{
+ Name: xml.Name{Space: "", Local: key},
+ Attr: []xml.Attr{},
+ }
+ if err := e.EncodeElement(value, elem); err != nil {
+ return err
+ }
+ }
+ if err := e.EncodeToken(xml.EndElement{Name: start.Name}); err != nil {
+ return err
+ }
+ return nil
+}
+
+func assert1(guard bool, text string) {
+ if !guard {
+ panic(text)
+ }
+}
+
+func filterFlags(content string) string {
+ for i, char := range content {
+ if char == ' ' || char == ';' {
+ return content[:i]
+ }
+ }
+ return content
+}
+
+func chooseData(custom, wildcard interface{}) interface{} {
+ if custom == nil {
+ if wildcard == nil {
+ panic("negotiation config is invalid")
+ }
+ return wildcard
+ }
+ return custom
+}
+
+func parseAccept(acceptHeader string) []string {
+ parts := strings.Split(acceptHeader, ",")
+ out := make([]string, 0, len(parts))
+ for _, part := range parts {
+ index := strings.IndexByte(part, ';')
+ if index >= 0 {
+ part = part[0:index]
+ }
+ part = strings.TrimSpace(part)
+ if len(part) > 0 {
+ out = append(out, part)
+ }
+ }
+ return out
+}
+
+func lastChar(str string) uint8 {
+ size := len(str)
+ if size == 0 {
+ panic("The length of the string can't be 0")
+ }
+ return str[size-1]
+}
+
+func nameOfFunction(f interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
+}
+
+func joinPaths(absolutePath, relativePath string) string {
+ if len(relativePath) == 0 {
+ return absolutePath
+ }
+
+ finalPath := path.Join(absolutePath, relativePath)
+ appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/'
+ if appendSlash {
+ return finalPath + "/"
+ }
+ return finalPath
+}
+
+func resolveAddress(addr []string) string {
+ switch len(addr) {
+ case 0:
+ if port := os.Getenv("PORT"); len(port) > 0 {
+ debugPrint("Environment variable PORT=\"%s\"", port)
+ return ":" + port
+ } else {
+ debugPrint("Environment variable PORT is undefined. Using port :8080 by default")
+ return ":8080"
+ }
+ case 1:
+ return addr[0]
+ default:
+ panic("too much parameters")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gin-gonic/gin/wercker.yml b/Godeps/_workspace/src/github.com/gin-gonic/gin/wercker.yml
new file mode 100644
index 0000000..3ab8084
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gin-gonic/gin/wercker.yml
@@ -0,0 +1 @@
+box: wercker/default
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore
new file mode 100644
index 0000000..7adca94
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore
@@ -0,0 +1,4 @@
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README.md b/Godeps/_workspace/src/github.com/go-ini/ini/README.md
new file mode 100644
index 0000000..7d50639
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/README.md
@@ -0,0 +1,624 @@
+ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte` or file) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+To use a tagged revision:
+
+ go get gopkg.in/ini.v1
+
+To use with latest changes:
+
+ go get github.com/go-ini/ini
+
+Please add `-u` flag to update in the future.
+
+### Testing
+
+If you want to test on your machine, please apply `-t` flag:
+
+ go get -t gopkg.in/ini.v1
+
+Please add `-u` flag to update in the future.
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To check if a key exists:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.GetSection("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+To check if raw value exists:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### Auto-split values into a slice
+
+To use zero value of type for invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+To exclude invalid values out of result slice:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+Or to return nothing but error when have invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // Things can be simpler.
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // Just map a section? Fine.
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string
+ None []int
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+Places = HangZhou,Boston
+None =
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md
new file mode 100644
index 0000000..49bc610
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md
@@ -0,0 +1,611 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte` 或文件)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+使用一个特定版本:
+
+ go get gopkg.in/ini.v1
+
+使用最新版:
+
+ go get github.com/go-ini/ini
+
+如需更新请添加 `-u` 选项。
+
+### 测试安装
+
+如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
+
+ go get -t gopkg.in/ini.v1
+
+如需更新请添加 `-u` 选项。
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+判断某个键是否存在:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.GetSection("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+判断某个原值是否存在:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### 自动分割键值到切片(slice)
+
+当存在无效输入时,使用零值代替:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+从结果切片中剔除无效输入:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+当存在无效输入时,直接返回错误:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+### 高级用法
+
+#### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+#### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // 一切竟可以如此的简单。
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // 嗯哼?只需要映射一个分区吗?
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string
+ None []int
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+Places = HangZhou,Boston
+None =
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go
new file mode 100644
index 0000000..ca9f634
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go
@@ -0,0 +1,1183 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ DEFAULT_SECTION = "DEFAULT"
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+
+ _VERSION = "1.9.0"
+)
+
+func Version() string {
+ return _VERSION
+}
+
+var (
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Write spaces around "=" to look better.
+ PrettyFormat = true
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is a interface that returns file content.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+ reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+ return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+ return nil
+}
+
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return &bytesReadCloser{bytes.NewReader(s.data)}, nil
+}
+
+// ____ __.
+// | |/ _|____ ___.__.
+// | <_/ __ < | |
+// | | \ ___/\___ |
+// |____|__ \___ > ____|
+// \/ \/\/
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncr bool
+}
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ val := k.value
+ if strings.Index(val, "%") == -1 {
+ return val
+ }
+
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 10, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ vals := strings.Split(str, delim)
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+ return vals
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.getFloat64s(delim, true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.getInts(delim, true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.getInt64s(delim, true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.getUints(delim, true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.getUint64s(delim, true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.getTimesFormat(format, delim, true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.getFloat64s(delim, false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.getInts(delim, false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.getInt64s(delim, false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.getUints(delim, false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.getUint64s(delim, false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.getTimesFormat(format, delim, false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.getFloat64s(delim, false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.getInts(delim, false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.getInt64s(delim, false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.getUints(delim, false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.getUint64s(delim, false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.getTimesFormat(format, delim, false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// getFloat64s returns list of float64 divided by given delimiter.
+func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ strs := k.Strings(delim)
+ vals := make([]float64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getInts returns list of int divided by given delimiter.
+func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ strs := k.Strings(delim)
+ vals := make([]int, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.Atoi(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getInt64s returns list of int64 divided by given delimiter.
+func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ strs := k.Strings(delim)
+ vals := make([]int64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getUints returns list of uint divided by given delimiter.
+func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ strs := k.Strings(delim)
+ vals := make([]uint, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 0)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, uint(val))
+ }
+ }
+ return vals, nil
+}
+
+// getUint64s returns list of uint64 divided by given delimiter.
+func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ strs := k.Strings(delim)
+ vals := make([]uint64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ strs := k.Strings(delim)
+ vals := make([]time.Time, 0, len(strs))
+ for _, str := range strs {
+ val, err := time.Parse(format, str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
+
+// _________ __ .__
+// / _____/ ____ _____/ |_|__| ____ ____
+// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \
+// / \ ___/\ \___| | | ( <_> ) | \
+// /_______ /\___ >\___ >__| |__|\____/|___| /
+// \/ \/ \/ \/
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ s.keys[name].value = val
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = &Key{s, "", name, val, false}
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ return
+ }
+ }
+}
+
+// ___________.__.__
+// \_ _____/|__| | ____
+// | __) | | | _/ __ \
+// | \ | | |_\ ___/
+// \___ / |__|____/\___ >
+// \/ \/
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ // Make sure data is safe in multiple goroutines.
+ lock sync.RWMutex
+
+ // Allow combination of multiple data sources.
+ dataSources []dataSource
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ // To keep data in order.
+ sectionList []string
+
+ NameMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource) *File {
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ }
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+func Load(source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("error when getting section: section '%s' not exists", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ sections := make([]*Section, len(f.sectionList))
+ for i := range f.sectionList {
+ sections[i] = f.Section(f.sectionList[i])
+ }
+ return sections
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+// WriteToIndent writes file content into io.Writer with given value indention.
+func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+ equalSign := "="
+ if PrettyFormat {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+ sec.Comment = "; " + sec.Comment
+ }
+ if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if i > 0 {
+ if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return 0, err
+ }
+ } else {
+ // Write nothing if default section is empty.
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+ if key.Comment[0] != '#' && key.Comment[0] != ';' {
+ key.Comment = "; " + key.Comment
+ }
+ if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncr:
+ kname = "-"
+ case strings.ContainsAny(kname, "\"=:"):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ val := key.value
+ // In case key value contains "\n", "`", "\"", "#" or ";".
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ // Put a line between sections.
+ if _, err = buf.WriteString(LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
+ defer os.Remove(tmpPath)
+
+ fw, err := os.Create(tmpPath)
+ if err != nil {
+ return err
+ }
+
+ if _, err = f.WriteToIndent(fw, indent); err != nil {
+ fw.Close()
+ return err
+ }
+ fw.Close()
+
+ // Remove old file and rename the new one.
+ os.Remove(filename)
+ return os.Rename(tmpPath, filename)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/parser.go b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go
new file mode 100644
index 0000000..1c1bf91
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go
@@ -0,0 +1,312 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type tokenType int
+
+const (
+ _TOKEN_INVALID tokenType = iota
+ _TOKEN_COMMENT
+ _TOKEN_SECTION
+ _TOKEN_KEY
+)
+
+type parser struct {
+ buf *bufio.Reader
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+ return &parser{
+ buf: bufio.NewReader(r),
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of BOM-UTF8 format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+ p.buf.Read(mask)
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ endIdx := -1
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], "=:")
+ if i < 0 {
+ return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, "=:")
+ if endIdx < 0 {
+ return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte) (string, error) {
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ // Won't be able to reach here if value only contains whitespace.
+ line = strings.TrimSpace(line)
+
+ // Check continuation lines
+ if line[len(line)-1] == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ i := strings.IndexAny(line, "#;")
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ // Trim single quotes
+ if hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"') {
+ line = line[1 : len(line)-1]
+ }
+ return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader)
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ section, _ := f.NewSection(DEFAULT_SECTION)
+
+ var line []byte
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.IndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ section, err = f.NewSection(string(line[1:closeIdx]))
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset aotu-counter and comments
+ p.comment.Reset()
+ p.count = 1
+ continue
+ }
+
+ kname, offset, err := readKeyName(line)
+ if err != nil {
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ key, err := section.NewKey(kname, "")
+ if err != nil {
+ return err
+ }
+ key.isAutoIncr = isAutoIncr
+
+ value, err := p.readValue(line[offset:])
+ if err != nil {
+ return err
+ }
+ key.SetValue(value)
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct.go b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go
new file mode 100644
index 0000000..3fb92c3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go
@@ -0,0 +1,351 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return nil
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil || intVal == 0 {
+ return nil
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ if err == nil {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return nil
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return nil
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return nil
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ vals := key.Strings(delim)
+ numVals := len(vals)
+ if numVals == 0 {
+ return nil
+ }
+
+ sliceOf := field.Type().Elem().Kind()
+
+ var times []time.Time
+ if sliceOf == reflectTime {
+ times = key.Times(delim)
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(times[i]))
+ default:
+ slice.Index(i).Set(reflect.ValueOf(vals[i]))
+ }
+ }
+ field.Set(slice)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func (s *Section) mapTo(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, tag)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// reflectWithProperType does the opposite thing with setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float64,
+ reflectTime:
+ key.SetValue(fmt.Sprint(field))
+ case reflect.Slice:
+ vals := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ isTime := fmt.Sprint(field.Type()) == "[]time.Time"
+ for i := 0; i < field.Len(); i++ {
+ if isTime {
+ buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ } else {
+ buf.WriteString(fmt.Sprint(vals.Index(i)))
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, tag)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct) {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/godbus/dbus/CONTRIBUTING.md
new file mode 100644
index 0000000..c88f9b2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# How to Contribute
+
+## Getting Started
+
+- Fork the repository on GitHub
+- Read the [README](README.markdown) for build and test instructions
+- Play with the project, submit bugs, submit patches!
+
+## Contribution Flow
+
+This is a rough outline of what a contributor's workflow looks like:
+
+- Create a topic branch from where you want to base your work (usually master).
+- Make commits of logical units.
+- Make sure your commit messages are in the proper format (see below).
+- Push your changes to a topic branch in your fork of the repository.
+- Make sure the tests pass, and add any new tests as appropriate.
+- Submit a pull request to the original repository.
+
+Thanks for your contributions!
+
+### Format of the Commit Message
+
+We follow a rough convention for commit messages that is designed to answer two
+questions: what changed and why. The subject line should feature the what and
+the body of the commit should describe the why.
+
+```
+scripts: add the test-cluster command
+
+this uses tmux to setup a test cluster that you can easily kill and
+start for debugging.
+
+Fixes #38
+```
+
+The format can be described more formally as follows:
+
+```
+:
+
+
+
+
+```
+
+The first line is the subject and should be no longer than 70 characters, the
+second line is always blank, and other lines should be wrapped at 80 characters.
+This allows the message to be easier to read on GitHub as well as in various
+git tools.
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE b/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE
new file mode 100644
index 0000000..670d88f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Georg Reinke (), Google
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/MAINTAINERS b/Godeps/_workspace/src/github.com/godbus/dbus/MAINTAINERS
new file mode 100644
index 0000000..e8968ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/MAINTAINERS
@@ -0,0 +1,2 @@
+Brandon Philips (@philips)
+Brian Waldon (@bcwaldon)
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown b/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown
new file mode 100644
index 0000000..0a6e7e5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown
@@ -0,0 +1,41 @@
+dbus
+----
+
+dbus is a simple library that implements native Go client bindings for the
+D-Bus message bus system.
+
+### Features
+
+* Complete native implementation of the D-Bus message protocol
+* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
+* Subpackages that help with the introspection / property interfaces
+
+### Installation
+
+This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
+
+```
+go get github.com/godbus/dbus
+```
+
+If you want to use the subpackages, you can install them the same way.
+
+### Usage
+
+The complete package documentation and some simple examples are available at
+[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
+[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
+gives a short overview over the basic usage.
+
+#### Projects using godbus
+- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
+
+Please note that the API is considered unstable for now and may change without
+further notice.
+
+### License
+
+go.dbus is available under the Simplified BSD License; see LICENSE for the full
+text.
+
+Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/auth.go b/Godeps/_workspace/src/github.com/godbus/dbus/auth.go
new file mode 100644
index 0000000..98017b6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/auth.go
@@ -0,0 +1,253 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+)
+
+// AuthStatus represents the Status of an authentication mechanism.
+type AuthStatus byte
+
+const (
+ // AuthOk signals that authentication is finished; the next command
+ // from the server should be an OK.
+ AuthOk AuthStatus = iota
+
+ // AuthContinue signals that additional data is needed; the next command
+ // from the server should be a DATA.
+ AuthContinue
+
+ // AuthError signals an error; the server sent invalid data or some
+ // other unexpected thing happened and the current authentication
+ // process should be aborted.
+ AuthError
+)
+
+type authState byte
+
+const (
+ waitingForData authState = iota
+ waitingForOk
+ waitingForReject
+)
+
+// Auth defines the behaviour of an authentication mechanism.
+type Auth interface {
+ // Return the name of the mechnism, the argument to the first AUTH command
+ // and the next status.
+ FirstData() (name, resp []byte, status AuthStatus)
+
+ // Process the given DATA command, and return the argument to the DATA
+ // command and the next status. If len(resp) == 0, no DATA command is sent.
+ HandleData(data []byte) (resp []byte, status AuthStatus)
+}
+
+// Auth authenticates the connection, trying the given list of authentication
+// mechanisms (in that order). If nil is passed, the EXTERNAL and
+// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
+// connections, this method must be called before sending any messages to the
+// bus. Auth must not be called on shared connections.
+func (conn *Conn) Auth(methods []Auth) error {
+ if methods == nil {
+ uid := strconv.Itoa(os.Getuid())
+ methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
+ }
+ in := bufio.NewReader(conn.transport)
+ err := conn.transport.SendNullByte()
+ if err != nil {
+ return err
+ }
+ err = authWriteLine(conn.transport, []byte("AUTH"))
+ if err != nil {
+ return err
+ }
+ s, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
+ return errors.New("dbus: authentication protocol error")
+ }
+ s = s[1:]
+ for _, v := range s {
+ for _, m := range methods {
+ if name, data, status := m.FirstData(); bytes.Equal(v, name) {
+ var ok bool
+ err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
+ if err != nil {
+ return err
+ }
+ switch status {
+ case AuthOk:
+ err, ok = conn.tryAuth(m, waitingForOk, in)
+ case AuthContinue:
+ err, ok = conn.tryAuth(m, waitingForData, in)
+ default:
+ panic("dbus: invalid authentication status")
+ }
+ if err != nil {
+ return err
+ }
+ if ok {
+ if conn.transport.SupportsUnixFDs() {
+ err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
+ if err != nil {
+ return err
+ }
+ line, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
+ conn.EnableUnixFDs()
+ conn.unixFD = true
+ case bytes.Equal(line[0], []byte("ERROR")):
+ default:
+ return errors.New("dbus: authentication protocol error")
+ }
+ }
+ err = authWriteLine(conn.transport, []byte("BEGIN"))
+ if err != nil {
+ return err
+ }
+ go conn.inWorker()
+ go conn.outWorker()
+ return nil
+ }
+ }
+ }
+ }
+ return errors.New("dbus: authentication failed")
+}
+
+// tryAuth tries to authenticate with m as the mechanism, using state as the
+// initial authState and in for reading input. It returns (nil, true) on
+// success, (nil, false) on a REJECTED and (someErr, false) if some other
+// error occured.
+func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
+ for {
+ s, err := authReadLine(in)
+ if err != nil {
+ return err, false
+ }
+ switch {
+ case state == waitingForData && string(s[0]) == "DATA":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ continue
+ }
+ data, status := m.HandleData(s[1])
+ switch status {
+ case AuthOk, AuthContinue:
+ if len(data) != 0 {
+ err = authWriteLine(conn.transport, []byte("DATA"), data)
+ if err != nil {
+ return err, false
+ }
+ }
+ if status == AuthOk {
+ state = waitingForOk
+ }
+ case AuthError:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ }
+ case state == waitingForData && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForData && string(s[0]) == "ERROR":
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForData && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForData:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForOk && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForOk && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForOk && (string(s[0]) == "DATA" ||
+ string(s[0]) == "ERROR"):
+
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForOk:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForReject && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForReject:
+ return errors.New("dbus: authentication protocol error"), false
+ default:
+ panic("dbus: invalid auth state")
+ }
+ }
+}
+
+// authReadLine reads a line and separates it into its fields.
+func authReadLine(in *bufio.Reader) ([][]byte, error) {
+ data, err := in.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+ data = bytes.TrimSuffix(data, []byte("\r\n"))
+ return bytes.Split(data, []byte{' '}), nil
+}
+
+// authWriteLine writes the given line in the authentication protocol format
+// (elements of data separated by a " " and terminated by "\r\n").
+func authWriteLine(out io.Writer, data ...[]byte) error {
+ buf := make([]byte, 0)
+ for i, v := range data {
+ buf = append(buf, v...)
+ if i != len(data)-1 {
+ buf = append(buf, ' ')
+ }
+ }
+ buf = append(buf, '\r')
+ buf = append(buf, '\n')
+ n, err := out.Write(buf)
+ if err != nil {
+ return err
+ }
+ if n != len(buf) {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go b/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go
new file mode 100644
index 0000000..7e376d3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go
@@ -0,0 +1,26 @@
+package dbus
+
+import (
+ "encoding/hex"
+)
+
+// AuthExternal returns an Auth that authenticates as the given user with the
+// EXTERNAL mechanism.
+func AuthExternal(user string) Auth {
+ return authExternal{user}
+}
+
+// AuthExternal implements the EXTERNAL authentication mechanism.
+type authExternal struct {
+ user string
+}
+
+func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("EXTERNAL"), b, AuthOk
+}
+
+func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
+ return nil, AuthError
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go b/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go
new file mode 100644
index 0000000..df15b46
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go
@@ -0,0 +1,102 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/hex"
+ "os"
+)
+
+// AuthCookieSha1 returns an Auth that authenticates as the given user with the
+// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
+// directory of the user.
+func AuthCookieSha1(user, home string) Auth {
+ return authCookieSha1{user, home}
+}
+
+type authCookieSha1 struct {
+ user, home string
+}
+
+func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
+}
+
+func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
+ challenge := make([]byte, len(data)/2)
+ _, err := hex.Decode(challenge, data)
+ if err != nil {
+ return nil, AuthError
+ }
+ b := bytes.Split(challenge, []byte{' '})
+ if len(b) != 3 {
+ return nil, AuthError
+ }
+ context := b[0]
+ id := b[1]
+ svchallenge := b[2]
+ cookie := a.getCookie(context, id)
+ if cookie == nil {
+ return nil, AuthError
+ }
+ clchallenge := a.generateChallenge()
+ if clchallenge == nil {
+ return nil, AuthError
+ }
+ hash := sha1.New()
+ hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
+ hexhash := make([]byte, 2*hash.Size())
+ hex.Encode(hexhash, hash.Sum(nil))
+ data = append(clchallenge, ' ')
+ data = append(data, hexhash...)
+ resp := make([]byte, 2*len(data))
+ hex.Encode(resp, data)
+ return resp, AuthOk
+}
+
+// getCookie searches for the cookie identified by id in context and returns
+// the cookie content or nil. (Since HandleData can't return a specific error,
+// but only whether an error occured, this function also doesn't bother to
+// return an error.)
+func (a authCookieSha1) getCookie(context, id []byte) []byte {
+ file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
+ if err != nil {
+ return nil
+ }
+ defer file.Close()
+ rd := bufio.NewReader(file)
+ for {
+ line, err := rd.ReadBytes('\n')
+ if err != nil {
+ return nil
+ }
+ line = line[:len(line)-1]
+ b := bytes.Split(line, []byte{' '})
+ if len(b) != 3 {
+ return nil
+ }
+ if bytes.Equal(b[0], id) {
+ return b[2]
+ }
+ }
+}
+
+// generateChallenge returns a random, hex-encoded challenge, or nil on error
+// (see above).
+func (a authCookieSha1) generateChallenge() []byte {
+ b := make([]byte, 16)
+ n, err := rand.Read(b)
+ if err != nil {
+ return nil
+ }
+ if n != 16 {
+ return nil
+ }
+ enc := make([]byte, 32)
+ hex.Encode(enc, b)
+ return enc
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/call.go b/Godeps/_workspace/src/github.com/godbus/dbus/call.go
new file mode 100644
index 0000000..ba6e73f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/call.go
@@ -0,0 +1,36 @@
+package dbus
+
+import (
+ "errors"
+)
+
+// Call represents a pending or completed method call.
+type Call struct {
+ Destination string
+ Path ObjectPath
+ Method string
+ Args []interface{}
+
+ // Strobes when the call is complete.
+ Done chan *Call
+
+ // After completion, the error status. If this is non-nil, it may be an
+ // error message from the peer (with Error as its type) or some other error.
+ Err error
+
+ // Holds the response once the call is done.
+ Body []interface{}
+}
+
+var errSignature = errors.New("dbus: mismatched signature")
+
+// Store stores the body of the reply into the provided pointers. It returns
+// an error if the signatures of the body and retvalues don't match, or if
+// the error status is not nil.
+func (c *Call) Store(retvalues ...interface{}) error {
+ if c.Err != nil {
+ return c.Err
+ }
+
+ return Store(c.Body, retvalues...)
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn.go
new file mode 100644
index 0000000..a4f5394
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn.go
@@ -0,0 +1,625 @@
+package dbus
+
+import (
+ "errors"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
+
+var (
+ systemBus *Conn
+ systemBusLck sync.Mutex
+ sessionBus *Conn
+ sessionBusLck sync.Mutex
+)
+
+// ErrClosed is the error returned by calls on a closed connection.
+var ErrClosed = errors.New("dbus: connection closed by user")
+
+// Conn represents a connection to a message bus (usually, the system or
+// session bus).
+//
+// Connections are either shared or private. Shared connections
+// are shared between calls to the functions that return them. As a result,
+// the methods Close, Auth and Hello must not be called on them.
+//
+// Multiple goroutines may invoke methods on a connection simultaneously.
+type Conn struct {
+ transport
+
+ busObj BusObject
+ unixFD bool
+ uuid string
+
+ names []string
+ namesLck sync.RWMutex
+
+ serialLck sync.Mutex
+ nextSerial uint32
+ serialUsed map[uint32]bool
+
+ calls map[uint32]*Call
+ callsLck sync.RWMutex
+
+ handlers map[ObjectPath]map[string]exportWithMapping
+ handlersLck sync.RWMutex
+
+ out chan *Message
+ closed bool
+ outLck sync.RWMutex
+
+ signals []chan<- *Signal
+ signalsLck sync.Mutex
+
+ eavesdropped chan<- *Message
+ eavesdroppedLck sync.Mutex
+}
+
+// SessionBus returns a shared connection to the session bus, connecting to it
+// if not already done.
+func SessionBus() (conn *Conn, err error) {
+ sessionBusLck.Lock()
+ defer sessionBusLck.Unlock()
+ if sessionBus != nil {
+ return sessionBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ sessionBus = conn
+ }
+ }()
+ conn, err = SessionBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+func SessionBusPrivate() (*Conn, error) {
+ address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
+ if address != "" && address != "autolaunch:" {
+ return Dial(address)
+ }
+
+ return sessionBusPlatform()
+}
+
+// SystemBus returns a shared connection to the system bus, connecting to it if
+// not already done.
+func SystemBus() (conn *Conn, err error) {
+ systemBusLck.Lock()
+ defer systemBusLck.Unlock()
+ if systemBus != nil {
+ return systemBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ systemBus = conn
+ }
+ }()
+ conn, err = SystemBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SystemBusPrivate returns a new private connection to the system bus.
+func SystemBusPrivate() (*Conn, error) {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return Dial(address)
+ }
+ return Dial(defaultSystemBusAddress)
+}
+
+// Dial establishes a new private connection to the message bus specified by address.
+func Dial(address string) (*Conn, error) {
+ tr, err := getTransport(address)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(tr)
+}
+
+// NewConn creates a new private *Conn from an already established connection.
+func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
+ return newConn(genericTransport{conn})
+}
+
+// newConn creates a new *Conn from a transport.
+func newConn(tr transport) (*Conn, error) {
+ conn := new(Conn)
+ conn.transport = tr
+ conn.calls = make(map[uint32]*Call)
+ conn.out = make(chan *Message, 10)
+ conn.handlers = make(map[ObjectPath]map[string]exportWithMapping)
+ conn.nextSerial = 1
+ conn.serialUsed = map[uint32]bool{0: true}
+ conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
+ return conn, nil
+}
+
+// BusObject returns the object owned by the bus daemon which handles
+// administrative requests.
+func (conn *Conn) BusObject() BusObject {
+ return conn.busObj
+}
+
+// Close closes the connection. Any blocked operations will return with errors
+// and the channels passed to Eavesdrop and Signal are closed. This method must
+// not be called on shared connections.
+func (conn *Conn) Close() error {
+ conn.outLck.Lock()
+ if conn.closed {
+ // inWorker calls Close on read error, the read error may
+ // be caused by another caller calling Close to shutdown the
+ // dbus connection, a double-close scenario we prevent here.
+ conn.outLck.Unlock()
+ return nil
+ }
+ close(conn.out)
+ conn.closed = true
+ conn.outLck.Unlock()
+ conn.signalsLck.Lock()
+ for _, ch := range conn.signals {
+ close(ch)
+ }
+ conn.signalsLck.Unlock()
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ close(conn.eavesdropped)
+ }
+ conn.eavesdroppedLck.Unlock()
+ return conn.transport.Close()
+}
+
+// Eavesdrop causes conn to send all incoming messages to the given channel
+// without further processing. Method replies, errors and signals will not be
+// sent to the appropiate channels and method calls will not be handled. If nil
+// is passed, the normal behaviour is restored.
+//
+// The caller has to make sure that ch is sufficiently buffered;
+// if a message arrives when a write to ch is not possible, the message is
+// discarded.
+func (conn *Conn) Eavesdrop(ch chan<- *Message) {
+ conn.eavesdroppedLck.Lock()
+ conn.eavesdropped = ch
+ conn.eavesdroppedLck.Unlock()
+}
+
+// getSerial returns an unused serial.
+func (conn *Conn) getSerial() uint32 {
+ conn.serialLck.Lock()
+ defer conn.serialLck.Unlock()
+ n := conn.nextSerial
+ for conn.serialUsed[n] {
+ n++
+ }
+ conn.serialUsed[n] = true
+ conn.nextSerial = n + 1
+ return n
+}
+
+// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
+// called after authentication, but before sending any other messages to the
+// bus. Hello must not be called for shared connections.
+func (conn *Conn) Hello() error {
+ var s string
+ err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
+ if err != nil {
+ return err
+ }
+ conn.namesLck.Lock()
+ conn.names = make([]string, 1)
+ conn.names[0] = s
+ conn.namesLck.Unlock()
+ return nil
+}
+
+// inWorker runs in an own goroutine, reading incoming messages from the
+// transport and dispatching them appropiately.
+func (conn *Conn) inWorker() {
+ for {
+ msg, err := conn.ReadMessage()
+ if err == nil {
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ select {
+ case conn.eavesdropped <- msg:
+ default:
+ }
+ conn.eavesdroppedLck.Unlock()
+ continue
+ }
+ conn.eavesdroppedLck.Unlock()
+ dest, _ := msg.Headers[FieldDestination].value.(string)
+ found := false
+ if dest == "" {
+ found = true
+ } else {
+ conn.namesLck.RLock()
+ if len(conn.names) == 0 {
+ found = true
+ }
+ for _, v := range conn.names {
+ if dest == v {
+ found = true
+ break
+ }
+ }
+ conn.namesLck.RUnlock()
+ }
+ if !found {
+ // Eavesdropped a message, but no channel for it is registered.
+ // Ignore it.
+ continue
+ }
+ switch msg.Type {
+ case TypeMethodReply, TypeError:
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ conn.callsLck.Lock()
+ if c, ok := conn.calls[serial]; ok {
+ if msg.Type == TypeError {
+ name, _ := msg.Headers[FieldErrorName].value.(string)
+ c.Err = Error{name, msg.Body}
+ } else {
+ c.Body = msg.Body
+ }
+ c.Done <- c
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, serial)
+ conn.serialLck.Unlock()
+ delete(conn.calls, serial)
+ }
+ conn.callsLck.Unlock()
+ case TypeSignal:
+ iface := msg.Headers[FieldInterface].value.(string)
+ member := msg.Headers[FieldMember].value.(string)
+ // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
+ // sender is optional for signals.
+ sender, _ := msg.Headers[FieldSender].value.(string)
+ if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
+ if member == "NameLost" {
+ // If we lost the name on the bus, remove it from our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the lost name")
+ }
+ conn.namesLck.Lock()
+ for i, v := range conn.names {
+ if v == name {
+ conn.names = append(conn.names[:i],
+ conn.names[i+1:]...)
+ }
+ }
+ conn.namesLck.Unlock()
+ } else if member == "NameAcquired" {
+ // If we acquired the name on the bus, add it to our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the acquired name")
+ }
+ conn.namesLck.Lock()
+ conn.names = append(conn.names, name)
+ conn.namesLck.Unlock()
+ }
+ }
+ signal := &Signal{
+ Sender: sender,
+ Path: msg.Headers[FieldPath].value.(ObjectPath),
+ Name: iface + "." + member,
+ Body: msg.Body,
+ }
+ conn.signalsLck.Lock()
+ for _, ch := range conn.signals {
+ ch <- signal
+ }
+ conn.signalsLck.Unlock()
+ case TypeMethodCall:
+ go conn.handleCall(msg)
+ }
+ } else if _, ok := err.(InvalidMessageError); !ok {
+ // Some read error occured (usually EOF); we can't really do
+ // anything but to shut down all stuff and returns errors to all
+ // pending replies.
+ conn.Close()
+ conn.callsLck.RLock()
+ for _, v := range conn.calls {
+ v.Err = err
+ v.Done <- v
+ }
+ conn.callsLck.RUnlock()
+ return
+ }
+ // invalid messages are ignored
+ }
+}
+
+// Names returns the list of all names that are currently owned by this
+// connection. The slice is always at least one element long, the first element
+// being the unique name of the connection.
+func (conn *Conn) Names() []string {
+ conn.namesLck.RLock()
+ // copy the slice so it can't be modified
+ s := make([]string, len(conn.names))
+ copy(s, conn.names)
+ conn.namesLck.RUnlock()
+ return s
+}
+
+// Object returns the object identified by the given destination name and path.
+func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
+ return &Object{conn, dest, path}
+}
+
+// outWorker runs in an own goroutine, encoding and sending messages that are
+// sent to conn.out.
+func (conn *Conn) outWorker() {
+ for msg := range conn.out {
+ err := conn.SendMessage(msg)
+ conn.callsLck.RLock()
+ if err != nil {
+ if c := conn.calls[msg.serial]; c != nil {
+ c.Err = err
+ c.Done <- c
+ }
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ } else if msg.Type != TypeMethodCall {
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ }
+ conn.callsLck.RUnlock()
+ }
+}
+
+// Send sends the given message to the message bus. You usually don't need to
+// use this; use the higher-level equivalents (Call / Go, Emit and Export)
+// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
+// call is returned and the same value is sent to ch (which must be buffered)
+// once the call is complete. Otherwise, ch is ignored and a Call structure is
+// returned of which only the Err member is valid.
+func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
+ var call *Call
+
+ msg.serial = conn.getSerial()
+ if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 5)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Conn).Send")
+ }
+ call = new(Call)
+ call.Destination, _ = msg.Headers[FieldDestination].value.(string)
+ call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
+ iface, _ := msg.Headers[FieldInterface].value.(string)
+ member, _ := msg.Headers[FieldMember].value.(string)
+ call.Method = iface + "." + member
+ call.Args = msg.Body
+ call.Done = ch
+ conn.callsLck.Lock()
+ conn.calls[msg.serial] = call
+ conn.callsLck.Unlock()
+ conn.outLck.RLock()
+ if conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+ } else {
+ conn.outLck.RLock()
+ if conn.closed {
+ call = &Call{Err: ErrClosed}
+ } else {
+ conn.out <- msg
+ call = &Call{Err: nil}
+ }
+ conn.outLck.RUnlock()
+ }
+ return call
+}
+
+// sendError creates an error message corresponding to the parameters and sends
+// it to conn.out.
+func (conn *Conn) sendError(e Error, dest string, serial uint32) {
+ msg := new(Message)
+ msg.Type = TypeError
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldErrorName] = MakeVariant(e.Name)
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = e.Body
+ if len(e.Body) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// sendReply creates a method reply message corresponding to the parameters and
+// sends it to conn.out.
+func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
+ msg := new(Message)
+ msg.Type = TypeMethodReply
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// Signal registers the given channel to be passed all received signal messages.
+// The caller has to make sure that ch is sufficiently buffered; if a message
+// arrives when a write to c is not possible, it is discarded.
+//
+// Multiple of these channels can be registered at the same time. Passing a
+// channel that already is registered will remove it from the list of the
+// registered channels.
+//
+// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
+// channel for eavesdropped messages, this channel receives all signals, and
+// none of the channels passed to Signal will receive any signals.
+func (conn *Conn) Signal(ch chan<- *Signal) {
+ conn.signalsLck.Lock()
+ conn.signals = append(conn.signals, ch)
+ conn.signalsLck.Unlock()
+}
+
+// SupportsUnixFDs returns whether the underlying transport supports passing of
+// unix file descriptors. If this is false, method calls containing unix file
+// descriptors will return an error and emitted signals containing them will
+// not be sent.
+func (conn *Conn) SupportsUnixFDs() bool {
+ return conn.unixFD
+}
+
+// Error represents a D-Bus message of type Error.
+type Error struct {
+ Name string
+ Body []interface{}
+}
+
+func NewError(name string, body []interface{}) *Error {
+ return &Error{name, body}
+}
+
+func (e Error) Error() string {
+ if len(e.Body) >= 1 {
+ s, ok := e.Body[0].(string)
+ if ok {
+ return s
+ }
+ }
+ return e.Name
+}
+
+// Signal represents a D-Bus message of type Signal. The name member is given in
+// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
+type Signal struct {
+ Sender string
+ Path ObjectPath
+ Name string
+ Body []interface{}
+}
+
+// transport is a D-Bus transport.
+type transport interface {
+ // Read and Write raw data (for example, for the authentication protocol).
+ io.ReadWriteCloser
+
+ // Send the initial null byte used for the EXTERNAL mechanism.
+ SendNullByte() error
+
+ // Returns whether this transport supports passing Unix FDs.
+ SupportsUnixFDs() bool
+
+ // Signal the transport that Unix FD passing is enabled for this connection.
+ EnableUnixFDs()
+
+ // Read / send a message, handling things like Unix FDs.
+ ReadMessage() (*Message, error)
+ SendMessage(*Message) error
+}
+
+var (
+ transports = make(map[string]func(string) (transport, error))
+)
+
+func getTransport(address string) (transport, error) {
+ var err error
+ var t transport
+
+ addresses := strings.Split(address, ";")
+ for _, v := range addresses {
+ i := strings.IndexRune(v, ':')
+ if i == -1 {
+ err = errors.New("dbus: invalid bus address (no transport)")
+ continue
+ }
+ f := transports[v[:i]]
+ if f == nil {
+ err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
+ continue
+ }
+ t, err = f(v[i+1:])
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, err
+}
+
+// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
+// of arbitrary types, containes the values that are obtained from dereferencing
+// all elements in vs.
+func dereferenceAll(vs []interface{}) []interface{} {
+ for i := range vs {
+ v := reflect.ValueOf(vs[i])
+ v = v.Elem()
+ vs[i] = v.Interface()
+ }
+ return vs
+}
+
+// getKey gets a key from a the list of keys. Returns "" on error / not found...
+func getKey(s, key string) string {
+ i := strings.Index(s, key)
+ if i == -1 {
+ return ""
+ }
+ if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
+ return ""
+ }
+ j := strings.Index(s, ",")
+ if j == -1 {
+ j = len(s)
+ }
+ return s[i+len(key)+1 : j]
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go
new file mode 100644
index 0000000..b67bb1b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go
@@ -0,0 +1,21 @@
+package dbus
+
+import (
+ "errors"
+ "os/exec"
+)
+
+func sessionBusPlatform() (*Conn, error) {
+ cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ return nil, errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return Dial("unix:path=" + string(b[:len(b)-1]))
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go b/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go
new file mode 100644
index 0000000..f74b875
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go
@@ -0,0 +1,27 @@
+// +build !darwin
+
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "os/exec"
+)
+
+func sessionBusPlatform() (*Conn, error) {
+ cmd := exec.Command("dbus-launch")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return nil, err
+ }
+
+ i := bytes.IndexByte(b, '=')
+ j := bytes.IndexByte(b, '\n')
+
+ if i == -1 || j == -1 {
+ return nil, errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return Dial(string(b[i+1 : j]))
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go b/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go
new file mode 100644
index 0000000..2ce6873
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go
@@ -0,0 +1,258 @@
+package dbus
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ boolType = reflect.TypeOf(false)
+ uint8Type = reflect.TypeOf(uint8(0))
+ int16Type = reflect.TypeOf(int16(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ signatureType = reflect.TypeOf(Signature{""})
+ objectPathType = reflect.TypeOf(ObjectPath(""))
+ variantType = reflect.TypeOf(Variant{Signature{""}, nil})
+ interfacesType = reflect.TypeOf([]interface{}{})
+ unixFDType = reflect.TypeOf(UnixFD(0))
+ unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
+)
+
+// An InvalidTypeError signals that a value which cannot be represented in the
+// D-Bus wire format was passed to a function.
+type InvalidTypeError struct {
+ Type reflect.Type
+}
+
+func (e InvalidTypeError) Error() string {
+ return "dbus: invalid type " + e.Type.String()
+}
+
+// Store copies the values contained in src to dest, which must be a slice of
+// pointers. It converts slices of interfaces from src to corresponding structs
+// in dest. An error is returned if the lengths of src and dest or the types of
+// their elements don't match.
+func Store(src []interface{}, dest ...interface{}) error {
+ if len(src) != len(dest) {
+ return errors.New("dbus.Store: length mismatch")
+ }
+
+ for i := range src {
+ if err := store(src[i], dest[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func store(src, dest interface{}) error {
+ if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
+ reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
+ return nil
+ } else if hasStruct(dest) {
+ rv := reflect.ValueOf(dest).Elem()
+ switch rv.Kind() {
+ case reflect.Struct:
+ vs, ok := src.([]interface{})
+ if !ok {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ t := rv.Type()
+ ndest := make([]interface{}, 0, rv.NumField())
+ for i := 0; i < rv.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ ndest = append(ndest, rv.Field(i).Addr().Interface())
+ }
+ }
+ if len(vs) != len(ndest) {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ err := Store(vs, ndest...)
+ if err != nil {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ case reflect.Slice:
+ sv := reflect.ValueOf(src)
+ if sv.Kind() != reflect.Slice {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
+ for i := 0; i < sv.Len(); i++ {
+ if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
+ return err
+ }
+ }
+ case reflect.Map:
+ sv := reflect.ValueOf(src)
+ if sv.Kind() != reflect.Map {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ keys := sv.MapKeys()
+ rv.Set(reflect.MakeMap(sv.Type()))
+ for _, key := range keys {
+ v := reflect.New(sv.Type().Elem())
+ if err := store(v, sv.MapIndex(key).Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(key, v.Elem())
+ }
+ default:
+ return errors.New("dbus.Store: type mismatch")
+ }
+ return nil
+ } else {
+ return errors.New("dbus.Store: type mismatch")
+ }
+}
+
+func hasStruct(v interface{}) bool {
+ t := reflect.TypeOf(v)
+ for {
+ switch t.Kind() {
+ case reflect.Struct:
+ return true
+ case reflect.Slice, reflect.Ptr, reflect.Map:
+ t = t.Elem()
+ default:
+ return false
+ }
+ }
+}
+
+// An ObjectPath is an object path as defined by the D-Bus spec.
+type ObjectPath string
+
+// IsValid returns whether the object path is valid.
+func (o ObjectPath) IsValid() bool {
+ s := string(o)
+ if len(s) == 0 {
+ return false
+ }
+ if s[0] != '/' {
+ return false
+ }
+ if s[len(s)-1] == '/' && len(s) != 1 {
+ return false
+ }
+ // probably not used, but technically possible
+ if s == "/" {
+ return true
+ }
+ split := strings.Split(s[1:], "/")
+ for _, v := range split {
+ if len(v) == 0 {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
+// documentation for more information about Unix file descriptor passsing.
+type UnixFD int32
+
+// A UnixFDIndex is the representation of a Unix file descriptor in a message.
+type UnixFDIndex uint32
+
+// alignment returns the alignment of values of type t.
+func alignment(t reflect.Type) int {
+ switch t {
+ case variantType:
+ return 1
+ case objectPathType:
+ return 4
+ case signatureType:
+ return 1
+ case interfacesType: // sometimes used for structs
+ return 8
+ }
+ switch t.Kind() {
+ case reflect.Uint8:
+ return 1
+ case reflect.Uint16, reflect.Int16:
+ return 2
+ case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
+ return 4
+ case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
+ return 8
+ case reflect.Ptr:
+ return alignment(t.Elem())
+ }
+ return 1
+}
+
+// isKeyType returns whether t is a valid type for a D-Bus dict.
+func isKeyType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
+ reflect.String:
+
+ return true
+ }
+ return false
+}
+
+// isValidInterface returns whether s is a valid name for an interface.
+func isValidInterface(s string) bool {
+ if len(s) == 0 || len(s) > 255 || s[0] == '.' {
+ return false
+ }
+ elem := strings.Split(s, ".")
+ if len(elem) < 2 {
+ return false
+ }
+ for _, v := range elem {
+ if len(v) == 0 {
+ return false
+ }
+ if v[0] >= '0' && v[0] <= '9' {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// isValidMember returns whether s is a valid name for a member.
+func isValidMember(s string) bool {
+ if len(s) == 0 || len(s) > 255 {
+ return false
+ }
+ i := strings.Index(s, ".")
+ if i != -1 {
+ return false
+ }
+ if s[0] >= '0' && s[0] <= '9' {
+ return false
+ }
+ for _, c := range s {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isMemberChar(c rune) bool {
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') || c == '_'
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go b/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go
new file mode 100644
index 0000000..ef50dca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go
@@ -0,0 +1,228 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+type decoder struct {
+ in io.Reader
+ order binary.ByteOrder
+ pos int
+}
+
+// newDecoder returns a new decoder that reads values from in. The input is
+// expected to be in the given byte order.
+func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
+ dec := new(decoder)
+ dec.in = in
+ dec.order = order
+ return dec
+}
+
+// align aligns the input to the given boundary and panics on error.
+func (dec *decoder) align(n int) {
+ if dec.pos%n != 0 {
+ newpos := (dec.pos + n - 1) & ^(n - 1)
+ empty := make([]byte, newpos-dec.pos)
+ if _, err := io.ReadFull(dec.in, empty); err != nil {
+ panic(err)
+ }
+ dec.pos = newpos
+ }
+}
+
+// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
+func (dec *decoder) binread(v interface{}) {
+ if err := binary.Read(dec.in, dec.order, v); err != nil {
+ panic(err)
+ }
+}
+
+func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
+ defer func() {
+ var ok bool
+ v := recover()
+ if err, ok = v.(error); ok {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ err = FormatError("unexpected EOF")
+ }
+ }
+ }()
+ vs = make([]interface{}, 0)
+ s := sig.str
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ return nil, err
+ }
+ v := dec.decode(s[:len(s)-len(rem)], 0)
+ vs = append(vs, v)
+ s = rem
+ }
+ return vs, nil
+}
+
+func (dec *decoder) decode(s string, depth int) interface{} {
+ dec.align(alignment(typeFor(s)))
+ switch s[0] {
+ case 'y':
+ var b [1]byte
+ if _, err := dec.in.Read(b[:]); err != nil {
+ panic(err)
+ }
+ dec.pos++
+ return b[0]
+ case 'b':
+ i := dec.decode("u", depth).(uint32)
+ switch {
+ case i == 0:
+ return false
+ case i == 1:
+ return true
+ default:
+ panic(FormatError("invalid value for boolean"))
+ }
+ case 'n':
+ var i int16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'i':
+ var i int32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 'x':
+ var i int64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'q':
+ var i uint16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'u':
+ var i uint32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 't':
+ var i uint64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'd':
+ var f float64
+ dec.binread(&f)
+ dec.pos += 8
+ return f
+ case 's':
+ length := dec.decode("u", depth).(uint32)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ return string(b[:len(b)-1])
+ case 'o':
+ return ObjectPath(dec.decode("s", depth).(string))
+ case 'g':
+ length := dec.decode("y", depth).(byte)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ sig, err := ParseSignature(string(b[:len(b)-1]))
+ if err != nil {
+ panic(err)
+ }
+ return sig
+ case 'v':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ var variant Variant
+ sig := dec.decode("g", depth).(Signature)
+ if len(sig.str) == 0 {
+ panic(FormatError("variant signature is empty"))
+ }
+ err, rem := validSingle(sig.str, 0)
+ if err != nil {
+ panic(err)
+ }
+ if rem != "" {
+ panic(FormatError("variant signature has multiple types"))
+ }
+ variant.sig = sig
+ variant.value = dec.decode(sig.str, depth+1)
+ return variant
+ case 'h':
+ return UnixFDIndex(dec.decode("u", depth).(uint32))
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ ksig := s[2:3]
+ vsig := s[3 : len(s)-1]
+ v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ // Even for empty maps, the correct padding must be included
+ dec.align(8)
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ dec.align(8)
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ kv := dec.decode(ksig, depth+2)
+ vv := dec.decode(vsig, depth+2)
+ v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return v.Interface()
+ }
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
+ // Even for empty arrays, the correct padding must be included
+ dec.align(alignment(typeFor(s[1:])))
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ ev := dec.decode(s[1:], depth+1)
+ v = reflect.Append(v, reflect.ValueOf(ev))
+ }
+ return v.Interface()
+ case '(':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ dec.align(8)
+ v := make([]interface{}, 0)
+ s = s[1 : len(s)-1]
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+ ev := dec.decode(s[:len(s)-len(rem)], depth+1)
+ v = append(v, ev)
+ s = rem
+ }
+ return v
+ default:
+ panic(SignatureError{Sig: s})
+ }
+}
+
+// A FormatError is an error in the wire format.
+type FormatError string
+
+func (e FormatError) Error() string {
+ return "dbus: wire format error: " + string(e)
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/doc.go b/Godeps/_workspace/src/github.com/godbus/dbus/doc.go
new file mode 100644
index 0000000..deff554
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/doc.go
@@ -0,0 +1,63 @@
+/*
+Package dbus implements bindings to the D-Bus message bus system.
+
+To use the message bus API, you first need to connect to a bus (usually the
+session or system bus). The acquired connection then can be used to call methods
+on remote objects and emit or receive signals. Using the Export method, you can
+arrange D-Bus methods calls to be directly translated to method calls on a Go
+value.
+
+Conversion Rules
+
+For outgoing messages, Go types are automatically converted to the
+corresponding D-Bus types. The following types are directly encoded as their
+respective D-Bus equivalents:
+
+ Go type | D-Bus type
+ ------------+-----------
+ byte | BYTE
+ bool | BOOLEAN
+ int16 | INT16
+ uint16 | UINT16
+ int32 | INT32
+ uint32 | UINT32
+ int64 | INT64
+ uint64 | UINT64
+ float64 | DOUBLE
+ string | STRING
+ ObjectPath | OBJECT_PATH
+ Signature | SIGNATURE
+ Variant | VARIANT
+ UnixFDIndex | UNIX_FD
+
+Slices and arrays encode as ARRAYs of their element type.
+
+Maps encode as DICTs, provided that their key type can be used as a key for
+a DICT.
+
+Structs other than Variant and Signature encode as a STRUCT containing their
+exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
+be skipped.
+
+Pointers encode as the value they're pointed to.
+
+Trying to encode any other type or a slice, map or struct containing an
+unsupported type will result in an InvalidTypeError.
+
+For incoming messages, the inverse of these rules are used, with the exception
+of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
+containing the struct fields in the correct order. The Store function can be
+used to convert such values to Go structs.
+
+Unix FD passing
+
+Handling Unix file descriptors deserves special mention. To use them, you should
+first check that they are supported on a connection by calling SupportsUnixFDs.
+If it returns true, all method of Connection will translate messages containing
+UnixFD's to messages that are accompanied by the given file descriptors with the
+UnixFD values being substituted by the correct indices. Similarily, the indices
+of incoming messages are automatically resolved. It shouldn't be necessary to use
+UnixFDIndex.
+
+*/
+package dbus
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go b/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go
new file mode 100644
index 0000000..9f0a9e8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go
@@ -0,0 +1,208 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+// An encoder encodes values to the D-Bus wire format.
+type encoder struct {
+ out io.Writer
+ order binary.ByteOrder
+ pos int
+}
+
+// NewEncoder returns a new encoder that writes to out in the given byte order.
+func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
+ return newEncoderAtOffset(out, 0, order)
+}
+
+// newEncoderAtOffset returns a new encoder that writes to out in the given
+// byte order. Specify the offset to initialize pos for proper alignment
+// computation.
+func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
+ enc := new(encoder)
+ enc.out = out
+ enc.order = order
+ enc.pos = offset
+ return enc
+}
+
+// Aligns the next output to be on a multiple of n. Panics on write errors.
+func (enc *encoder) align(n int) {
+ pad := enc.padding(0, n)
+ if pad > 0 {
+ empty := make([]byte, pad)
+ if _, err := enc.out.Write(empty); err != nil {
+ panic(err)
+ }
+ enc.pos += pad
+ }
+}
+
+// pad returns the number of bytes of padding, based on current position and additional offset.
+// and alignment.
+func (enc *encoder) padding(offset, algn int) int {
+ abs := enc.pos + offset
+ if abs%algn != 0 {
+ newabs := (abs + algn - 1) & ^(algn - 1)
+ return newabs - abs
+ }
+ return 0
+}
+
+// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
+func (enc *encoder) binwrite(v interface{}) {
+ if err := binary.Write(enc.out, enc.order, v); err != nil {
+ panic(err)
+ }
+}
+
+// Encode encodes the given values to the underyling reader. All written values
+// are aligned properly as required by the D-Bus spec.
+func (enc *encoder) Encode(vs ...interface{}) (err error) {
+ defer func() {
+ err, _ = recover().(error)
+ }()
+ for _, v := range vs {
+ enc.encode(reflect.ValueOf(v), 0)
+ }
+ return nil
+}
+
+// encode encodes the given value to the writer and panics on error. depth holds
+// the depth of the container nesting.
+func (enc *encoder) encode(v reflect.Value, depth int) {
+ enc.align(alignment(v.Type()))
+ switch v.Kind() {
+ case reflect.Uint8:
+ var b [1]byte
+ b[0] = byte(v.Uint())
+ if _, err := enc.out.Write(b[:]); err != nil {
+ panic(err)
+ }
+ enc.pos++
+ case reflect.Bool:
+ if v.Bool() {
+ enc.encode(reflect.ValueOf(uint32(1)), depth)
+ } else {
+ enc.encode(reflect.ValueOf(uint32(0)), depth)
+ }
+ case reflect.Int16:
+ enc.binwrite(int16(v.Int()))
+ enc.pos += 2
+ case reflect.Uint16:
+ enc.binwrite(uint16(v.Uint()))
+ enc.pos += 2
+ case reflect.Int32:
+ enc.binwrite(int32(v.Int()))
+ enc.pos += 4
+ case reflect.Uint32:
+ enc.binwrite(uint32(v.Uint()))
+ enc.pos += 4
+ case reflect.Int64:
+ enc.binwrite(v.Int())
+ enc.pos += 8
+ case reflect.Uint64:
+ enc.binwrite(v.Uint())
+ enc.pos += 8
+ case reflect.Float64:
+ enc.binwrite(v.Float())
+ enc.pos += 8
+ case reflect.String:
+ enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
+ b := make([]byte, v.Len()+1)
+ copy(b, v.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case reflect.Ptr:
+ enc.encode(v.Elem(), depth)
+ case reflect.Slice, reflect.Array:
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus alignment for elements.
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+
+ for i := 0; i < v.Len(); i++ {
+ bufenc.encode(v.Index(i), depth+1)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(alignment(v.Type().Elem()))
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Struct:
+ if depth >= 64 && v.Type() != signatureType {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ switch t := v.Type(); t {
+ case signatureType:
+ str := v.Field(0)
+ enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
+ b := make([]byte, str.Len()+1)
+ copy(b, str.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case variantType:
+ variant := v.Interface().(Variant)
+ enc.encode(reflect.ValueOf(variant.sig), depth+1)
+ enc.encode(reflect.ValueOf(variant.value), depth+1)
+ default:
+ for i := 0; i < v.Type().NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ enc.encode(v.Field(i), depth+1)
+ }
+ }
+ }
+ case reflect.Map:
+ // Maps are arrays of structures, so they actually increase the depth by
+ // 2.
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ keys := v.MapKeys()
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus 8-byte alignment
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, 8)
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+ for _, k := range keys {
+ bufenc.align(8)
+ bufenc.encode(k, depth+2)
+ bufenc.encode(v.MapIndex(k), depth+2)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(8)
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ default:
+ panic(InvalidTypeError{v.Type()})
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/export.go b/Godeps/_workspace/src/github.com/godbus/dbus/export.go
new file mode 100644
index 0000000..c6440a7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/export.go
@@ -0,0 +1,411 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ errmsgInvalidArg = Error{
+ "org.freedesktop.DBus.Error.InvalidArgs",
+ []interface{}{"Invalid type / number of args"},
+ }
+ errmsgNoObject = Error{
+ "org.freedesktop.DBus.Error.NoSuchObject",
+ []interface{}{"No such object"},
+ }
+ errmsgUnknownMethod = Error{
+ "org.freedesktop.DBus.Error.UnknownMethod",
+ []interface{}{"Unknown / invalid method"},
+ }
+)
+
+// exportWithMapping represents an exported struct along with a method name
+// mapping to allow for exporting lower-case methods, etc.
+type exportWithMapping struct {
+ export interface{}
+
+ // Method name mapping; key -> struct method, value -> dbus method.
+ mapping map[string]string
+
+ // Whether or not this export is for the entire subtree
+ includeSubtree bool
+}
+
+// Sender is a type which can be used in exported methods to receive the message
+// sender.
+type Sender string
+
+func exportedMethod(export exportWithMapping, name string) reflect.Value {
+ if export.export == nil {
+ return reflect.Value{}
+ }
+
+ // If a mapping was included in the export, check the map to see if we
+ // should be looking for a different method in the export.
+ if export.mapping != nil {
+ for key, value := range export.mapping {
+ if value == name {
+ name = key
+ break
+ }
+
+ // Catch the case where a method is aliased but the client is calling
+ // the original, e.g. the "Foo" method was exported mapped to
+ // "foo," and dbus client called the original "Foo."
+ if key == name {
+ return reflect.Value{}
+ }
+ }
+ }
+
+ value := reflect.ValueOf(export.export)
+ m := value.MethodByName(name)
+
+ // Catch the case of attempting to call an unexported method
+ method, ok := value.Type().MethodByName(name)
+
+ if !m.IsValid() || !ok || method.PkgPath != "" {
+ return reflect.Value{}
+ }
+ t := m.Type()
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
+
+ return reflect.Value{}
+ }
+ return m
+}
+
+// searchHandlers will look through all registered handlers looking for one
+// to handle the given path. If a verbatim one isn't found, it will check for
+// a subtree registration for the path as well.
+func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) {
+ conn.handlersLck.RLock()
+ defer conn.handlersLck.RUnlock()
+
+ handlers, ok := conn.handlers[path]
+ if ok {
+ return handlers, ok
+ }
+
+ // If handlers weren't found for this exact path, look for a matching subtree
+ // registration
+ handlers = make(map[string]exportWithMapping)
+ path = path[:strings.LastIndex(string(path), "/")]
+ for len(path) > 0 {
+ var subtreeHandlers map[string]exportWithMapping
+ subtreeHandlers, ok = conn.handlers[path]
+ if ok {
+ for iface, handler := range subtreeHandlers {
+ // Only include this handler if it registered for the subtree
+ if handler.includeSubtree {
+ handlers[iface] = handler
+ }
+ }
+
+ break
+ }
+
+ path = path[:strings.LastIndex(string(path), "/")]
+ }
+
+ return handlers, ok
+}
+
+// handleCall handles the given method call (i.e. looks if it's one of the
+// pre-implemented ones and searches for a corresponding handler if not).
+func (conn *Conn) handleCall(msg *Message) {
+ name := msg.Headers[FieldMember].value.(string)
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
+ sender, hasSender := msg.Headers[FieldSender].value.(string)
+ serial := msg.serial
+ if ifaceName == "org.freedesktop.DBus.Peer" {
+ switch name {
+ case "Ping":
+ conn.sendReply(sender, serial)
+ case "GetMachineId":
+ conn.sendReply(sender, serial, conn.uuid)
+ default:
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ }
+ return
+ }
+ if len(name) == 0 {
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ }
+
+ // Find the exported handler (if any) for this path
+ handlers, ok := conn.searchHandlers(path)
+ if !ok {
+ conn.sendError(errmsgNoObject, sender, serial)
+ return
+ }
+
+ var m reflect.Value
+ if hasIface {
+ iface := handlers[ifaceName]
+ m = exportedMethod(iface, name)
+ } else {
+ for _, v := range handlers {
+ m = exportedMethod(v, name)
+ if m.IsValid() {
+ break
+ }
+ }
+ }
+
+ if !m.IsValid() {
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ return
+ }
+
+ t := m.Type()
+ vs := msg.Body
+ pointers := make([]interface{}, t.NumIn())
+ decode := make([]interface{}, 0, len(vs))
+ for i := 0; i < t.NumIn(); i++ {
+ tp := t.In(i)
+ val := reflect.New(tp)
+ pointers[i] = val.Interface()
+ if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
+ val.Elem().SetString(sender)
+ } else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
+ val.Elem().Set(reflect.ValueOf(*msg))
+ } else {
+ decode = append(decode, pointers[i])
+ }
+ }
+
+ if len(decode) != len(vs) {
+ conn.sendError(errmsgInvalidArg, sender, serial)
+ return
+ }
+
+ if err := Store(vs, decode...); err != nil {
+ conn.sendError(errmsgInvalidArg, sender, serial)
+ return
+ }
+
+ // Extract parameters
+ params := make([]reflect.Value, len(pointers))
+ for i := 0; i < len(pointers); i++ {
+ params[i] = reflect.ValueOf(pointers[i]).Elem()
+ }
+
+ // Call method
+ ret := m.Call(params)
+ if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
+ conn.sendError(*em, sender, serial)
+ return
+ }
+
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ reply := new(Message)
+ reply.Type = TypeMethodReply
+ reply.serial = conn.getSerial()
+ reply.Headers = make(map[HeaderField]Variant)
+ if hasSender {
+ reply.Headers[FieldDestination] = msg.Headers[FieldSender]
+ }
+ reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
+ reply.Body = make([]interface{}, len(ret)-1)
+ for i := 0; i < len(ret)-1; i++ {
+ reply.Body[i] = ret[i].Interface()
+ }
+ if len(ret) != 1 {
+ reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- reply
+ }
+ conn.outLck.RUnlock()
+ }
+}
+
+// Emit emits the given signal on the message bus. The name parameter must be
+// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
+func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
+ if !path.IsValid() {
+ return errors.New("dbus: invalid object path")
+ }
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return errors.New("dbus: invalid method name")
+ }
+ iface := name[:i]
+ member := name[i+1:]
+ if !isValidMember(member) {
+ return errors.New("dbus: invalid method name")
+ }
+ if !isValidInterface(iface) {
+ return errors.New("dbus: invalid interface name")
+ }
+ msg := new(Message)
+ msg.Type = TypeSignal
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ msg.Headers[FieldMember] = MakeVariant(member)
+ msg.Headers[FieldPath] = MakeVariant(path)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ defer conn.outLck.RUnlock()
+ if conn.closed {
+ return ErrClosed
+ }
+ conn.out <- msg
+ return nil
+}
+
+// Export registers the given value to be exported as an object on the
+// message bus.
+//
+// If a method call on the given path and interface is received, an exported
+// method with the same name is called with v as the receiver if the
+// parameters match and the last return value is of type *Error. If this
+// *Error is not nil, it is sent back to the caller as an error.
+// Otherwise, a method reply is sent with the other return values as its body.
+//
+// Any parameters with the special type Sender are set to the sender of the
+// dbus message when the method is called. Parameters of this type do not
+// contribute to the dbus signature of the method (i.e. the method is exposed
+// as if the parameters of type Sender were not there).
+//
+// Similarly, any parameters with the type Message are set to the raw message
+// received on the bus. Again, parameters of this type do not contribute to the
+// dbus signature of the method.
+//
+// Every method call is executed in a new goroutine, so the method may be called
+// in multiple goroutines at once.
+//
+// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
+// handled for every object.
+//
+// Passing nil as the first parameter will cause conn to cease handling calls on
+// the given combination of path and interface.
+//
+// Export returns an error if path is not a valid path name.
+func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportWithMap(v, nil, path, iface)
+}
+
+// ExportWithMap works exactly like Export but provides the ability to remap
+// method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.exportWithMap(v, mapping, path, iface, false)
+}
+
+// ExportSubtree works exactly like Export but registers the given value for
+// an entire subtree rather under the root path provided.
+//
+// In order to make this useful, one parameter in each of the value's exported
+// methods should be a Message, in which case it will contain the raw message
+// (allowing one to get access to the path that caused the method to be called).
+//
+// Note that more specific export paths take precedence over less specific. For
+// example, a method call using the ObjectPath /foo/bar/baz will call a method
+// exported on /foo/bar before a method exported on /foo.
+func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportSubtreeWithMap(v, nil, path, iface)
+}
+
+// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
+// ability to remap method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.exportWithMap(v, mapping, path, iface, true)
+}
+
+// exportWithMap is the worker function for all exports/registrations.
+func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error {
+ if !path.IsValid() {
+ return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
+ }
+
+ conn.handlersLck.Lock()
+ defer conn.handlersLck.Unlock()
+
+ // Remove a previous export if the interface is nil
+ if v == nil {
+ if _, ok := conn.handlers[path]; ok {
+ delete(conn.handlers[path], iface)
+ if len(conn.handlers[path]) == 0 {
+ delete(conn.handlers, path)
+ }
+ }
+
+ return nil
+ }
+
+ // If this is the first handler for this path, make a new map to hold all
+ // handlers for this path.
+ if _, ok := conn.handlers[path]; !ok {
+ conn.handlers[path] = make(map[string]exportWithMapping)
+ }
+
+ // Finally, save this handler
+ conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree}
+
+ return nil
+}
+
+// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
+func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return ReleaseNameReply(r), nil
+}
+
+// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
+func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return RequestNameReply(r), nil
+}
+
+// ReleaseNameReply is the reply to a ReleaseName call.
+type ReleaseNameReply uint32
+
+const (
+ ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
+ ReleaseNameReplyNonExistent
+ ReleaseNameReplyNotOwner
+)
+
+// RequestNameFlags represents the possible flags for a RequestName call.
+type RequestNameFlags uint32
+
+const (
+ NameFlagAllowReplacement RequestNameFlags = 1 << iota
+ NameFlagReplaceExisting
+ NameFlagDoNotQueue
+)
+
+// RequestNameReply is the reply to a RequestName call.
+type RequestNameReply uint32
+
+const (
+ RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
+ RequestNameReplyInQueue
+ RequestNameReplyExists
+ RequestNameReplyAlreadyOwner
+)
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go b/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go
new file mode 100644
index 0000000..0b745f9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go
@@ -0,0 +1,28 @@
+package dbus
+
+import (
+ "os"
+ "sync"
+)
+
+var (
+ homeDir string
+ homeDirLock sync.Mutex
+)
+
+func getHomeDir() string {
+ homeDirLock.Lock()
+ defer homeDirLock.Unlock()
+
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = os.Getenv("HOME")
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = lookupHomeDir()
+ return homeDir
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go
new file mode 100644
index 0000000..2732081
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go
@@ -0,0 +1,15 @@
+// +build !static_build
+
+package dbus
+
+import (
+ "os/user"
+)
+
+func lookupHomeDir() string {
+ u, err := user.Current()
+ if err != nil {
+ return "/"
+ }
+ return u.HomeDir
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go
new file mode 100644
index 0000000..b9d9cb5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go
@@ -0,0 +1,45 @@
+// +build static_build
+
+package dbus
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func lookupHomeDir() string {
+ myUid := os.Getuid()
+
+ f, err := os.Open("/etc/passwd")
+ if err != nil {
+ return "/"
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ break
+ }
+
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ parts := strings.Split(line, ":")
+
+ if len(parts) >= 6 {
+ uid, err := strconv.Atoi(parts[2])
+ if err == nil && uid == myUid {
+ return parts[5]
+ }
+ }
+ }
+
+ // Default to / if we can't get a better value
+ return "/"
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go
new file mode 100644
index 0000000..790a23e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go
@@ -0,0 +1,27 @@
+package introspect
+
+import (
+ "encoding/xml"
+ "github.com/godbus/dbus"
+ "strings"
+)
+
+// Call calls org.freedesktop.Introspectable.Introspect on a remote object
+// and returns the introspection data.
+func Call(o dbus.BusObject) (*Node, error) {
+ var xmldata string
+ var node Node
+
+ err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata)
+ if err != nil {
+ return nil, err
+ }
+ err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node)
+ if err != nil {
+ return nil, err
+ }
+ if node.Name == "" {
+ node.Name = string(o.Path())
+ }
+ return &node, nil
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go
new file mode 100644
index 0000000..b06c3f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go
@@ -0,0 +1,86 @@
+// Package introspect provides some utilities for dealing with the DBus
+// introspection format.
+package introspect
+
+import "encoding/xml"
+
+// The introspection data for the org.freedesktop.DBus.Introspectable interface.
+var IntrospectData = Interface{
+ Name: "org.freedesktop.DBus.Introspectable",
+ Methods: []Method{
+ {
+ Name: "Introspect",
+ Args: []Arg{
+ {"out", "s", "out"},
+ },
+ },
+ },
+}
+
+// XML document type declaration of the introspection format version 1.0
+const IntrospectDeclarationString = `
+
+`
+
+// The introspection data for the org.freedesktop.DBus.Introspectable interface,
+// as a string.
+const IntrospectDataString = `
+
+
+
+
+
+`
+
+// Node is the root element of an introspection.
+type Node struct {
+ XMLName xml.Name `xml:"node"`
+ Name string `xml:"name,attr,omitempty"`
+ Interfaces []Interface `xml:"interface"`
+ Children []Node `xml:"node,omitempty"`
+}
+
+// Interface describes a DBus interface that is available on the message bus.
+type Interface struct {
+ Name string `xml:"name,attr"`
+ Methods []Method `xml:"method"`
+ Signals []Signal `xml:"signal"`
+ Properties []Property `xml:"property"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Method describes a Method on an Interface as retured by an introspection.
+type Method struct {
+ Name string `xml:"name,attr"`
+ Args []Arg `xml:"arg"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Signal describes a Signal emitted on an Interface.
+type Signal struct {
+ Name string `xml:"name,attr"`
+ Args []Arg `xml:"arg"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Property describes a property of an Interface.
+type Property struct {
+ Name string `xml:"name,attr"`
+ Type string `xml:"type,attr"`
+ Access string `xml:"access,attr"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Arg represents an argument of a method or a signal.
+type Arg struct {
+ Name string `xml:"name,attr,omitempty"`
+ Type string `xml:"type,attr"`
+ Direction string `xml:"direction,attr,omitempty"`
+}
+
+// Annotation is an annotation in the introspection format.
+type Annotation struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go
new file mode 100644
index 0000000..2f16690
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go
@@ -0,0 +1,76 @@
+package introspect
+
+import (
+ "encoding/xml"
+ "github.com/godbus/dbus"
+ "reflect"
+ "strings"
+)
+
+// Introspectable implements org.freedesktop.Introspectable.
+//
+// You can create it by converting the XML-formatted introspection data from a
+// string to an Introspectable or call NewIntrospectable with a Node. Then,
+// export it as org.freedesktop.Introspectable on you object.
+type Introspectable string
+
+// NewIntrospectable returns an Introspectable that returns the introspection
+// data that corresponds to the given Node. If n.Interfaces doesn't contain the
+// data for org.freedesktop.DBus.Introspectable, it is added automatically.
+func NewIntrospectable(n *Node) Introspectable {
+ found := false
+ for _, v := range n.Interfaces {
+ if v.Name == "org.freedesktop.DBus.Introspectable" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ n.Interfaces = append(n.Interfaces, IntrospectData)
+ }
+ b, err := xml.Marshal(n)
+ if err != nil {
+ panic(err)
+ }
+ return Introspectable(strings.TrimSpace(IntrospectDeclarationString) + string(b))
+}
+
+// Introspect implements org.freedesktop.Introspectable.Introspect.
+func (i Introspectable) Introspect() (string, *dbus.Error) {
+ return string(i), nil
+}
+
+// Methods returns the description of the methods of v. This can be used to
+// create a Node which can be passed to NewIntrospectable.
+func Methods(v interface{}) []Method {
+ t := reflect.TypeOf(v)
+ ms := make([]Method, 0, t.NumMethod())
+ for i := 0; i < t.NumMethod(); i++ {
+ if t.Method(i).PkgPath != "" {
+ continue
+ }
+ mt := t.Method(i).Type
+ if mt.NumOut() == 0 ||
+ mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{}) {
+
+ continue
+ }
+ var m Method
+ m.Name = t.Method(i).Name
+ m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2)
+ for j := 1; j < mt.NumIn(); j++ {
+ if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() &&
+ mt.In(j) != reflect.TypeOf((*dbus.Message)(nil)).Elem() {
+ arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"}
+ m.Args = append(m.Args, arg)
+ }
+ }
+ for j := 0; j < mt.NumOut()-1; j++ {
+ arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"}
+ m.Args = append(m.Args, arg)
+ }
+ m.Annotations = make([]Annotation, 0)
+ ms = append(ms, m)
+ }
+ return ms
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/message.go b/Godeps/_workspace/src/github.com/godbus/dbus/message.go
new file mode 100644
index 0000000..075d6e3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/message.go
@@ -0,0 +1,346 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+const protoVersion byte = 1
+
+// Flags represents the possible flags of a D-Bus message.
+type Flags byte
+
+const (
+ // FlagNoReplyExpected signals that the message is not expected to generate
+ // a reply. If this flag is set on outgoing messages, any possible reply
+ // will be discarded.
+ FlagNoReplyExpected Flags = 1 << iota
+ // FlagNoAutoStart signals that the message bus should not automatically
+ // start an application when handling this message.
+ FlagNoAutoStart
+)
+
+// Type represents the possible types of a D-Bus message.
+type Type byte
+
+const (
+ TypeMethodCall Type = 1 + iota
+ TypeMethodReply
+ TypeError
+ TypeSignal
+ typeMax
+)
+
+func (t Type) String() string {
+ switch t {
+ case TypeMethodCall:
+ return "method call"
+ case TypeMethodReply:
+ return "reply"
+ case TypeError:
+ return "error"
+ case TypeSignal:
+ return "signal"
+ }
+ return "invalid"
+}
+
+// HeaderField represents the possible byte codes for the headers
+// of a D-Bus message.
+type HeaderField byte
+
+const (
+ FieldPath HeaderField = 1 + iota
+ FieldInterface
+ FieldMember
+ FieldErrorName
+ FieldReplySerial
+ FieldDestination
+ FieldSender
+ FieldSignature
+ FieldUnixFDs
+ fieldMax
+)
+
+// An InvalidMessageError describes the reason why a D-Bus message is regarded as
+// invalid.
+type InvalidMessageError string
+
+func (e InvalidMessageError) Error() string {
+ return "dbus: invalid message: " + string(e)
+}
+
+// fieldType are the types of the various header fields.
+var fieldTypes = [fieldMax]reflect.Type{
+ FieldPath: objectPathType,
+ FieldInterface: stringType,
+ FieldMember: stringType,
+ FieldErrorName: stringType,
+ FieldReplySerial: uint32Type,
+ FieldDestination: stringType,
+ FieldSender: stringType,
+ FieldSignature: signatureType,
+ FieldUnixFDs: uint32Type,
+}
+
+// requiredFields lists the header fields that are required by the different
+// message types.
+var requiredFields = [typeMax][]HeaderField{
+ TypeMethodCall: {FieldPath, FieldMember},
+ TypeMethodReply: {FieldReplySerial},
+ TypeError: {FieldErrorName, FieldReplySerial},
+ TypeSignal: {FieldPath, FieldInterface, FieldMember},
+}
+
+// Message represents a single D-Bus message.
+type Message struct {
+ Type
+ Flags
+ Headers map[HeaderField]Variant
+ Body []interface{}
+
+ serial uint32
+}
+
+type header struct {
+ Field byte
+ Variant
+}
+
+// DecodeMessage tries to decode a single message in the D-Bus wire format
+// from the given reader. The byte order is figured out from the first byte.
+// The possibly returned error can be an error of the underlying reader, an
+// InvalidMessageError or a FormatError.
+func DecodeMessage(rd io.Reader) (msg *Message, err error) {
+ var order binary.ByteOrder
+ var hlength, length uint32
+ var typ, flags, proto byte
+ var headers []header
+
+ b := make([]byte, 1)
+ _, err = rd.Read(b)
+ if err != nil {
+ return
+ }
+ switch b[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+
+ dec := newDecoder(rd, order)
+ dec.pos = 1
+
+ msg = new(Message)
+ vs, err := dec.Decode(Signature{"yyyuu"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
+ return nil, err
+ }
+ msg.Type = Type(typ)
+ msg.Flags = Flags(flags)
+
+ // get the header length separately because we need it later
+ b = make([]byte, 4)
+ _, err = io.ReadFull(rd, b)
+ if err != nil {
+ return nil, err
+ }
+ binary.Read(bytes.NewBuffer(b), order, &hlength)
+ if hlength+length+16 > 1<<27 {
+ return nil, InvalidMessageError("message is too long")
+ }
+ dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
+ dec.pos = 12
+ vs, err = dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &headers); err != nil {
+ return nil, err
+ }
+
+ msg.Headers = make(map[HeaderField]Variant)
+ for _, v := range headers {
+ msg.Headers[HeaderField(v.Field)] = v.Variant
+ }
+
+ dec.align(8)
+ body := make([]byte, int(length))
+ if length != 0 {
+ _, err := io.ReadFull(rd, body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err = msg.IsValid(); err != nil {
+ return nil, err
+ }
+ sig, _ := msg.Headers[FieldSignature].value.(Signature)
+ if sig.str != "" {
+ buf := bytes.NewBuffer(body)
+ dec = newDecoder(buf, order)
+ vs, err := dec.Decode(sig)
+ if err != nil {
+ return nil, err
+ }
+ msg.Body = vs
+ }
+
+ return
+}
+
+// EncodeTo encodes and sends a message to the given writer. The byte order must
+// be either binary.LittleEndian or binary.BigEndian. If the message is not
+// valid or an error occurs when writing, an error is returned.
+func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
+ if err := msg.IsValid(); err != nil {
+ return err
+ }
+ var vs [7]interface{}
+ switch order {
+ case binary.LittleEndian:
+ vs[0] = byte('l')
+ case binary.BigEndian:
+ vs[0] = byte('B')
+ default:
+ return errors.New("dbus: invalid byte order")
+ }
+ body := new(bytes.Buffer)
+ enc := newEncoder(body, order)
+ if len(msg.Body) != 0 {
+ enc.Encode(msg.Body...)
+ }
+ vs[1] = msg.Type
+ vs[2] = msg.Flags
+ vs[3] = protoVersion
+ vs[4] = uint32(len(body.Bytes()))
+ vs[5] = msg.serial
+ headers := make([]header, 0, len(msg.Headers))
+ for k, v := range msg.Headers {
+ headers = append(headers, header{byte(k), v})
+ }
+ vs[6] = headers
+ var buf bytes.Buffer
+ enc = newEncoder(&buf, order)
+ enc.Encode(vs[:]...)
+ enc.align(8)
+ body.WriteTo(&buf)
+ if buf.Len() > 1<<27 {
+ return InvalidMessageError("message is too long")
+ }
+ if _, err := buf.WriteTo(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsValid checks whether msg is a valid message and returns an
+// InvalidMessageError if it is not.
+func (msg *Message) IsValid() error {
+ if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
+ return InvalidMessageError("invalid flags")
+ }
+ if msg.Type == 0 || msg.Type >= typeMax {
+ return InvalidMessageError("invalid message type")
+ }
+ for k, v := range msg.Headers {
+ if k == 0 || k >= fieldMax {
+ return InvalidMessageError("invalid header")
+ }
+ if reflect.TypeOf(v.value) != fieldTypes[k] {
+ return InvalidMessageError("invalid type of header field")
+ }
+ }
+ for _, v := range requiredFields[msg.Type] {
+ if _, ok := msg.Headers[v]; !ok {
+ return InvalidMessageError("missing required header")
+ }
+ }
+ if path, ok := msg.Headers[FieldPath]; ok {
+ if !path.value.(ObjectPath).IsValid() {
+ return InvalidMessageError("invalid path name")
+ }
+ }
+ if iface, ok := msg.Headers[FieldInterface]; ok {
+ if !isValidInterface(iface.value.(string)) {
+ return InvalidMessageError("invalid interface name")
+ }
+ }
+ if member, ok := msg.Headers[FieldMember]; ok {
+ if !isValidMember(member.value.(string)) {
+ return InvalidMessageError("invalid member name")
+ }
+ }
+ if errname, ok := msg.Headers[FieldErrorName]; ok {
+ if !isValidInterface(errname.value.(string)) {
+ return InvalidMessageError("invalid error name")
+ }
+ }
+ if len(msg.Body) != 0 {
+ if _, ok := msg.Headers[FieldSignature]; !ok {
+ return InvalidMessageError("missing signature")
+ }
+ }
+ return nil
+}
+
+// Serial returns the message's serial number. The returned value is only valid
+// for messages received by eavesdropping.
+func (msg *Message) Serial() uint32 {
+ return msg.serial
+}
+
+// String returns a string representation of a message similar to the format of
+// dbus-monitor.
+func (msg *Message) String() string {
+ if err := msg.IsValid(); err != nil {
+ return ""
+ }
+ s := msg.Type.String()
+ if v, ok := msg.Headers[FieldSender]; ok {
+ s += " from " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldDestination]; ok {
+ s += " to " + v.value.(string)
+ }
+ s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
+ if v, ok := msg.Headers[FieldReplySerial]; ok {
+ s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldUnixFDs]; ok {
+ s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldPath]; ok {
+ s += " path " + string(v.value.(ObjectPath))
+ }
+ if v, ok := msg.Headers[FieldInterface]; ok {
+ s += " interface " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldErrorName]; ok {
+ s += " error " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldMember]; ok {
+ s += " member " + v.value.(string)
+ }
+ if len(msg.Body) != 0 {
+ s += "\n"
+ }
+ for i, v := range msg.Body {
+ s += " " + MakeVariant(v).String()
+ if i != len(msg.Body)-1 {
+ s += "\n"
+ }
+ }
+ return s
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/object.go b/Godeps/_workspace/src/github.com/godbus/dbus/object.go
new file mode 100644
index 0000000..7ef45da
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/object.go
@@ -0,0 +1,126 @@
+package dbus
+
+import (
+ "errors"
+ "strings"
+)
+
+// BusObject is the interface of a remote object on which methods can be
+// invoked.
+type BusObject interface {
+ Call(method string, flags Flags, args ...interface{}) *Call
+ Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ GetProperty(p string) (Variant, error)
+ Destination() string
+ Path() ObjectPath
+}
+
+// Object represents a remote object on which methods can be invoked.
+type Object struct {
+ conn *Conn
+ dest string
+ path ObjectPath
+}
+
+// Call calls a method with (*Object).Go and waits for its reply.
+func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
+ return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
+}
+
+// Go calls a method with the given arguments asynchronously. It returns a
+// Call structure representing this method call. The passed channel will
+// return the same value once the call is done. If ch is nil, a new channel
+// will be allocated. Otherwise, ch has to be buffered or Go will panic.
+//
+// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
+// is returned of which only the Err member is valid.
+//
+// If the method parameter contains a dot ('.'), the part before the last dot
+// specifies the interface on which the method is called.
+func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ iface := ""
+ i := strings.LastIndex(method, ".")
+ if i != -1 {
+ iface = method[:i]
+ }
+ method = method[i+1:]
+ msg := new(Message)
+ msg.Type = TypeMethodCall
+ msg.serial = o.conn.getSerial()
+ msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldPath] = MakeVariant(o.path)
+ msg.Headers[FieldDestination] = MakeVariant(o.dest)
+ msg.Headers[FieldMember] = MakeVariant(method)
+ if iface != "" {
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ }
+ msg.Body = args
+ if len(args) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
+ }
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 10)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Object).Go")
+ }
+ call := &Call{
+ Destination: o.dest,
+ Path: o.path,
+ Method: method,
+ Args: args,
+ Done: ch,
+ }
+ o.conn.callsLck.Lock()
+ o.conn.calls[msg.serial] = call
+ o.conn.callsLck.Unlock()
+ o.conn.outLck.RLock()
+ if o.conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ o.conn.out <- msg
+ }
+ o.conn.outLck.RUnlock()
+ return call
+ }
+ o.conn.outLck.RLock()
+ defer o.conn.outLck.RUnlock()
+ if o.conn.closed {
+ return &Call{Err: ErrClosed}
+ }
+ o.conn.out <- msg
+ return &Call{Err: nil}
+}
+
+// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
+// object. The property name must be given in interface.member notation.
+func (o *Object) GetProperty(p string) (Variant, error) {
+ idx := strings.LastIndex(p, ".")
+ if idx == -1 || idx+1 == len(p) {
+ return Variant{}, errors.New("dbus: invalid property " + p)
+ }
+
+ iface := p[:idx]
+ prop := p[idx+1:]
+
+ result := Variant{}
+ err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
+
+ if err != nil {
+ return Variant{}, err
+ }
+
+ return result, nil
+}
+
+// Destination returns the destination that calls on o are sent to.
+func (o *Object) Destination() string {
+ return o.dest
+}
+
+// Path returns the path that calls on o are sent to.
+func (o *Object) Path() ObjectPath {
+ return o.path
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go b/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go
new file mode 100644
index 0000000..834a1fa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go
@@ -0,0 +1,264 @@
+// Package prop provides the Properties struct which can be used to implement
+// org.freedesktop.DBus.Properties.
+package prop
+
+import (
+ "github.com/godbus/dbus"
+ "github.com/godbus/dbus/introspect"
+ "sync"
+)
+
+// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is
+// emitted for a property. If it is EmitTrue, the signal is emitted. If it is
+// EmitInvalidates, the signal is also emitted, but the new value of the property
+// is not disclosed.
+type EmitType byte
+
+const (
+ EmitFalse EmitType = iota
+ EmitTrue
+ EmitInvalidates
+)
+
+// ErrIfaceNotFound is the error returned to peers who try to access properties
+// on interfaces that aren't found.
+var ErrIfaceNotFound = dbus.NewError("org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil)
+
+// ErrPropNotFound is the error returned to peers trying to access properties
+// that aren't found.
+var ErrPropNotFound = dbus.NewError("org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil)
+
+// ErrReadOnly is the error returned to peers trying to set a read-only
+// property.
+var ErrReadOnly = dbus.NewError("org.freedesktop.DBus.Properties.Error.ReadOnly", nil)
+
+// ErrInvalidArg is returned to peers if the type of the property that is being
+// changed and the argument don't match.
+var ErrInvalidArg = dbus.NewError("org.freedesktop.DBus.Properties.Error.InvalidArg", nil)
+
+// The introspection data for the org.freedesktop.DBus.Properties interface.
+var IntrospectData = introspect.Interface{
+ Name: "org.freedesktop.DBus.Properties",
+ Methods: []introspect.Method{
+ {
+ Name: "Get",
+ Args: []introspect.Arg{
+ {"interface", "s", "in"},
+ {"property", "s", "in"},
+ {"value", "v", "out"},
+ },
+ },
+ {
+ Name: "GetAll",
+ Args: []introspect.Arg{
+ {"interface", "s", "in"},
+ {"props", "a{sv}", "out"},
+ },
+ },
+ {
+ Name: "Set",
+ Args: []introspect.Arg{
+ {"interface", "s", "in"},
+ {"property", "s", "in"},
+ {"value", "v", "in"},
+ },
+ },
+ },
+ Signals: []introspect.Signal{
+ {
+ Name: "PropertiesChanged",
+ Args: []introspect.Arg{
+ {"interface", "s", "out"},
+ {"changed_properties", "a{sv}", "out"},
+ {"invalidates_properties", "as", "out"},
+ },
+ },
+ },
+}
+
+// The introspection data for the org.freedesktop.DBus.Properties interface, as
+// a string.
+const IntrospectDataString = `
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+`
+
+// Prop represents a single property. It is used for creating a Properties
+// value.
+type Prop struct {
+ // Initial value. Must be a DBus-representable type.
+ Value interface{}
+
+ // If true, the value can be modified by calls to Set.
+ Writable bool
+
+ // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is
+ // emitted if this property changes.
+ Emit EmitType
+
+ // If not nil, anytime this property is changed by Set, this function is
+ // called with an appropiate Change as its argument. If the returned error
+ // is not nil, it is sent back to the caller of Set and the property is not
+ // changed.
+ Callback func(*Change) *dbus.Error
+}
+
+// Change represents a change of a property by a call to Set.
+type Change struct {
+ Props *Properties
+ Iface string
+ Name string
+ Value interface{}
+}
+
+// Properties is a set of values that can be made available to the message bus
+// using the org.freedesktop.DBus.Properties interface. It is safe for
+// concurrent use by multiple goroutines.
+type Properties struct {
+ m map[string]map[string]*Prop
+ mut sync.RWMutex
+ conn *dbus.Conn
+ path dbus.ObjectPath
+}
+
+// New returns a new Properties structure that manages the given properties.
+// The key for the first-level map of props is the name of the interface; the
+// second-level key is the name of the property. The returned structure will be
+// exported as org.freedesktop.DBus.Properties on path.
+func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties {
+ p := &Properties{m: props, conn: conn, path: path}
+ conn.Export(p, path, "org.freedesktop.DBus.Properties")
+ return p
+}
+
+// Get implements org.freedesktop.DBus.Properties.Get.
+func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ m, ok := p.m[iface]
+ if !ok {
+ return dbus.Variant{}, ErrIfaceNotFound
+ }
+ prop, ok := m[property]
+ if !ok {
+ return dbus.Variant{}, ErrPropNotFound
+ }
+ return dbus.MakeVariant(prop.Value), nil
+}
+
+// GetAll implements org.freedesktop.DBus.Properties.GetAll.
+func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ m, ok := p.m[iface]
+ if !ok {
+ return nil, ErrIfaceNotFound
+ }
+ rm := make(map[string]dbus.Variant, len(m))
+ for k, v := range m {
+ rm[k] = dbus.MakeVariant(v.Value)
+ }
+ return rm, nil
+}
+
+// GetMust returns the value of the given property and panics if either the
+// interface or the property name are invalid.
+func (p *Properties) GetMust(iface, property string) interface{} {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ return p.m[iface][property].Value
+}
+
+// Introspection returns the introspection data that represents the properties
+// of iface.
+func (p *Properties) Introspection(iface string) []introspect.Property {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ m := p.m[iface]
+ s := make([]introspect.Property, 0, len(m))
+ for k, v := range m {
+ p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()}
+ if v.Writable {
+ p.Access = "readwrite"
+ } else {
+ p.Access = "read"
+ }
+ s = append(s, p)
+ }
+ return s
+}
+
+// set sets the given property and emits PropertyChanged if appropiate. p.mut
+// must already be locked.
+func (p *Properties) set(iface, property string, v interface{}) {
+ prop := p.m[iface][property]
+ prop.Value = v
+ switch prop.Emit {
+ case EmitFalse:
+ // do nothing
+ case EmitInvalidates:
+ p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
+ iface, map[string]dbus.Variant{}, []string{property})
+ case EmitTrue:
+ p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
+ iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)},
+ []string{})
+ default:
+ panic("invalid value for EmitType")
+ }
+}
+
+// Set implements org.freedesktop.Properties.Set.
+func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error {
+ p.mut.Lock()
+ defer p.mut.Unlock()
+ m, ok := p.m[iface]
+ if !ok {
+ return ErrIfaceNotFound
+ }
+ prop, ok := m[property]
+ if !ok {
+ return ErrPropNotFound
+ }
+ if !prop.Writable {
+ return ErrReadOnly
+ }
+ if newv.Signature() != dbus.SignatureOf(prop.Value) {
+ return ErrInvalidArg
+ }
+ if prop.Callback != nil {
+ err := prop.Callback(&Change{p, iface, property, newv.Value()})
+ if err != nil {
+ return err
+ }
+ }
+ p.set(iface, property, newv.Value())
+ return nil
+}
+
+// SetMust sets the value of the given property and panics if the interface or
+// the property name are invalid.
+func (p *Properties) SetMust(iface, property string, v interface{}) {
+ p.mut.Lock()
+ p.set(iface, property, v)
+ p.mut.Unlock()
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/sig.go b/Godeps/_workspace/src/github.com/godbus/dbus/sig.go
new file mode 100644
index 0000000..f45b53c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/sig.go
@@ -0,0 +1,257 @@
+package dbus
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var sigToType = map[byte]reflect.Type{
+ 'y': byteType,
+ 'b': boolType,
+ 'n': int16Type,
+ 'q': uint16Type,
+ 'i': int32Type,
+ 'u': uint32Type,
+ 'x': int64Type,
+ 't': uint64Type,
+ 'd': float64Type,
+ 's': stringType,
+ 'g': signatureType,
+ 'o': objectPathType,
+ 'v': variantType,
+ 'h': unixFDIndexType,
+}
+
+// Signature represents a correct type signature as specified by the D-Bus
+// specification. The zero value represents the empty signature, "".
+type Signature struct {
+ str string
+}
+
+// SignatureOf returns the concatenation of all the signatures of the given
+// values. It panics if one of them is not representable in D-Bus.
+func SignatureOf(vs ...interface{}) Signature {
+ var s string
+ for _, v := range vs {
+ s += getSignature(reflect.TypeOf(v))
+ }
+ return Signature{s}
+}
+
+// SignatureOfType returns the signature of the given type. It panics if the
+// type is not representable in D-Bus.
+func SignatureOfType(t reflect.Type) Signature {
+ return Signature{getSignature(t)}
+}
+
+// getSignature returns the signature of the given type and panics on unknown types.
+func getSignature(t reflect.Type) string {
+ // handle simple types first
+ switch t.Kind() {
+ case reflect.Uint8:
+ return "y"
+ case reflect.Bool:
+ return "b"
+ case reflect.Int16:
+ return "n"
+ case reflect.Uint16:
+ return "q"
+ case reflect.Int32:
+ if t == unixFDType {
+ return "h"
+ }
+ return "i"
+ case reflect.Uint32:
+ if t == unixFDIndexType {
+ return "h"
+ }
+ return "u"
+ case reflect.Int64:
+ return "x"
+ case reflect.Uint64:
+ return "t"
+ case reflect.Float64:
+ return "d"
+ case reflect.Ptr:
+ return getSignature(t.Elem())
+ case reflect.String:
+ if t == objectPathType {
+ return "o"
+ }
+ return "s"
+ case reflect.Struct:
+ if t == variantType {
+ return "v"
+ } else if t == signatureType {
+ return "g"
+ }
+ var s string
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ s += getSignature(t.Field(i).Type)
+ }
+ }
+ return "(" + s + ")"
+ case reflect.Array, reflect.Slice:
+ return "a" + getSignature(t.Elem())
+ case reflect.Map:
+ if !isKeyType(t.Key()) {
+ panic(InvalidTypeError{t})
+ }
+ return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
+ }
+ panic(InvalidTypeError{t})
+}
+
+// ParseSignature returns the signature represented by this string, or a
+// SignatureError if the string is not a valid signature.
+func ParseSignature(s string) (sig Signature, err error) {
+ if len(s) == 0 {
+ return
+ }
+ if len(s) > 255 {
+ return Signature{""}, SignatureError{s, "too long"}
+ }
+ sig.str = s
+ for err == nil && len(s) != 0 {
+ err, s = validSingle(s, 0)
+ }
+ if err != nil {
+ sig = Signature{""}
+ }
+
+ return
+}
+
+// ParseSignatureMust behaves like ParseSignature, except that it panics if s
+// is not valid.
+func ParseSignatureMust(s string) Signature {
+ sig, err := ParseSignature(s)
+ if err != nil {
+ panic(err)
+ }
+ return sig
+}
+
+// Empty retruns whether the signature is the empty signature.
+func (s Signature) Empty() bool {
+ return s.str == ""
+}
+
+// Single returns whether the signature represents a single, complete type.
+func (s Signature) Single() bool {
+ err, r := validSingle(s.str, 0)
+ return err != nil && r == ""
+}
+
+// String returns the signature's string representation.
+func (s Signature) String() string {
+ return s.str
+}
+
+// A SignatureError indicates that a signature passed to a function or received
+// on a connection is not a valid signature.
+type SignatureError struct {
+ Sig string
+ Reason string
+}
+
+func (e SignatureError) Error() string {
+ return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
+}
+
+// Try to read a single type from this string. If it was successfull, err is nil
+// and rem is the remaining unparsed part. Otherwise, err is a non-nil
+// SignatureError and rem is "". depth is the current recursion depth which may
+// not be greater than 64 and should be given as 0 on the first call.
+func validSingle(s string, depth int) (err error, rem string) {
+ if s == "" {
+ return SignatureError{Sig: s, Reason: "empty signature"}, ""
+ }
+ if depth > 64 {
+ return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
+ }
+ switch s[0] {
+ case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
+ return nil, s[1:]
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ i := findMatching(s[1:], '{', '}')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
+ }
+ i++
+ rem = s[i+1:]
+ s = s[2:i]
+ if err, _ = validSingle(s[:1], depth+1); err != nil {
+ return err, ""
+ }
+ err, nr := validSingle(s[1:], depth+1)
+ if err != nil {
+ return err, ""
+ }
+ if nr != "" {
+ return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
+ }
+ return nil, rem
+ }
+ return validSingle(s[1:], depth+1)
+ case '(':
+ i := findMatching(s, '(', ')')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
+ }
+ rem = s[i+1:]
+ s = s[1:i]
+ for err == nil && s != "" {
+ err, s = validSingle(s, depth+1)
+ }
+ if err != nil {
+ rem = ""
+ }
+ return
+ }
+ return SignatureError{Sig: s, Reason: "invalid type character"}, ""
+}
+
+func findMatching(s string, left, right rune) int {
+ n := 0
+ for i, v := range s {
+ if v == left {
+ n++
+ } else if v == right {
+ n--
+ }
+ if n == 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// typeFor returns the type of the given signature. It ignores any left over
+// characters and panics if s doesn't start with a valid type signature.
+func typeFor(s string) (t reflect.Type) {
+ err, _ := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ if t, ok := sigToType[s[0]]; ok {
+ return t
+ }
+ switch s[0] {
+ case 'a':
+ if s[1] == '{' {
+ i := strings.LastIndex(s, "}")
+ t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
+ } else {
+ t = reflect.SliceOf(typeFor(s[1:]))
+ }
+ case '(':
+ t = interfacesType
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go
new file mode 100644
index 0000000..1bba0d6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go
@@ -0,0 +1,6 @@
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go
new file mode 100644
index 0000000..46f8f49
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go
@@ -0,0 +1,35 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+type genericTransport struct {
+ io.ReadWriteCloser
+}
+
+func (t genericTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
+
+func (t genericTransport) SupportsUnixFDs() bool {
+ return false
+}
+
+func (t genericTransport) EnableUnixFDs() {}
+
+func (t genericTransport) ReadMessage() (*Message, error) {
+ return DecodeMessage(t)
+}
+
+func (t genericTransport) SendMessage(msg *Message) error {
+ for _, v := range msg.Body {
+ if _, ok := v.(UnixFD); ok {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ }
+ return msg.EncodeTo(t, binary.LittleEndian)
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go
new file mode 100644
index 0000000..3fafeab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go
@@ -0,0 +1,196 @@
+//+build !windows
+
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+type oobReader struct {
+ conn *net.UnixConn
+ oob []byte
+ buf [4096]byte
+}
+
+func (o *oobReader) Read(b []byte) (n int, err error) {
+ n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
+ if err != nil {
+ return n, err
+ }
+ if flags&syscall.MSG_CTRUNC != 0 {
+ return n, errors.New("dbus: control data truncated (too many fds received)")
+ }
+ o.oob = append(o.oob, o.buf[:oobn]...)
+ return n, nil
+}
+
+type unixTransport struct {
+ *net.UnixConn
+ hasUnixFDs bool
+}
+
+func newUnixTransport(keys string) (transport, error) {
+ var err error
+
+ t := new(unixTransport)
+ abstract := getKey(keys, "abstract")
+ path := getKey(keys, "path")
+ switch {
+ case abstract == "" && path == "":
+ return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
+ case abstract != "" && path == "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ case abstract == "" && path != "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ default:
+ return nil, errors.New("dbus: invalid address (both path and abstract set)")
+ }
+}
+
+func init() {
+ transports["unix"] = newUnixTransport
+}
+
+func (t *unixTransport) EnableUnixFDs() {
+ t.hasUnixFDs = true
+}
+
+func (t *unixTransport) ReadMessage() (*Message, error) {
+ var (
+ blen, hlen uint32
+ csheader [16]byte
+ headers []header
+ order binary.ByteOrder
+ unixfds uint32
+ )
+ // To be sure that all bytes of out-of-band data are read, we use a special
+ // reader that uses ReadUnix on the underlying connection instead of Read
+ // and gathers the out-of-band data in a buffer.
+ rd := &oobReader{conn: t.UnixConn}
+ // read the first 16 bytes (the part of the header that has a constant size),
+ // from which we can figure out the length of the rest of the message
+ if _, err := io.ReadFull(rd, csheader[:]); err != nil {
+ return nil, err
+ }
+ switch csheader[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+ // csheader[4:8] -> length of message body, csheader[12:16] -> length of
+ // header fields (without alignment)
+ binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
+ binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
+ if hlen%8 != 0 {
+ hlen += 8 - (hlen % 8)
+ }
+
+ // decode headers and look for unix fds
+ headerdata := make([]byte, hlen+4)
+ copy(headerdata, csheader[12:])
+ if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
+ return nil, err
+ }
+ dec := newDecoder(bytes.NewBuffer(headerdata), order)
+ dec.pos = 12
+ vs, err := dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ Store(vs, &headers)
+ for _, v := range headers {
+ if v.Field == byte(FieldUnixFDs) {
+ unixfds, _ = v.Variant.value.(uint32)
+ }
+ }
+ all := make([]byte, 16+hlen+blen)
+ copy(all, csheader[:])
+ copy(all[16:], headerdata[4:])
+ if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
+ return nil, err
+ }
+ if unixfds != 0 {
+ if !t.hasUnixFDs {
+ return nil, errors.New("dbus: got unix fds on unsupported transport")
+ }
+ // read the fds from the OOB data
+ scms, err := syscall.ParseSocketControlMessage(rd.oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, errors.New("dbus: received more than one socket control message")
+ }
+ fds, err := syscall.ParseUnixRights(&scms[0])
+ if err != nil {
+ return nil, err
+ }
+ msg, err := DecodeMessage(bytes.NewBuffer(all))
+ if err != nil {
+ return nil, err
+ }
+ // substitute the values in the message body (which are indices for the
+ // array receiver via OOB) with the actual values
+ for i, v := range msg.Body {
+ if j, ok := v.(UnixFDIndex); ok {
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ msg.Body[i] = UnixFD(fds[j])
+ }
+ }
+ return msg, nil
+ }
+ return DecodeMessage(bytes.NewBuffer(all))
+}
+
+func (t *unixTransport) SendMessage(msg *Message) error {
+ fds := make([]int, 0)
+ for i, v := range msg.Body {
+ if fd, ok := v.(UnixFD); ok {
+ msg.Body[i] = UnixFDIndex(len(fds))
+ fds = append(fds, int(fd))
+ }
+ }
+ if len(fds) != 0 {
+ if !t.hasUnixFDs {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
+ oob := syscall.UnixRights(fds...)
+ buf := new(bytes.Buffer)
+ msg.EncodeTo(buf, binary.LittleEndian)
+ n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() || oobn != len(oob) {
+ return io.ErrShortWrite
+ }
+ } else {
+ if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
+ return nil
+ }
+ }
+ return nil
+}
+
+func (t *unixTransport) SupportsUnixFDs() bool {
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go
new file mode 100644
index 0000000..a8cd393
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go
@@ -0,0 +1,95 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+// Local implementation of the UnixCredentials system call for DragonFly BSD
+
+package dbus
+
+/*
+#include
+*/
+import "C"
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
+// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+// http://golang.org/src/pkg/syscall/types_linux.go
+// http://golang.org/src/pkg/syscall/types_dragonfly.go
+// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
+const (
+ SizeofUcred = C.sizeof_struct_ucred
+)
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgAlignOf(salen int) int {
+ // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+ //salign := sizeofPtr
+ // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
+ // still require 32-bit aligned access to network subsystem.
+ //if darwin64Bit || dragonfly64Bit {
+ // salign = 4
+ //}
+ salign := 4
+ return (salen + salign - 1) & ^(salign - 1)
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// UnixCredentials encodes credentials into a socket control message
+// for sending to another process. This can be used for
+// authentication.
+func UnixCredentials(ucred *Ucred) []byte {
+ b := make([]byte, syscall.CmsgSpace(SizeofUcred))
+ h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = syscall.SOL_SOCKET
+ h.Type = syscall.SCM_CREDS
+ h.SetLen(syscall.CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// ParseUnixCredentials decodes a socket control message that contains
+// credentials in a Ucred structure. To receive such a message, the
+// SO_PASSCRED option must be enabled on the socket.
+func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != syscall.SOL_SOCKET {
+ return nil, syscall.EINVAL
+ }
+ if m.Header.Type != syscall.SCM_CREDS {
+ return nil, syscall.EINVAL
+ }
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
+ return &ucred, nil
+}
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go
new file mode 100644
index 0000000..d9dfdf6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go
@@ -0,0 +1,25 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+package dbus
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := syscall.UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant.go
new file mode 100644
index 0000000..b7b13ae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant.go
@@ -0,0 +1,139 @@
+package dbus
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Variant represents the D-Bus variant type.
+type Variant struct {
+ sig Signature
+ value interface{}
+}
+
+// MakeVariant converts the given value to a Variant. It panics if v cannot be
+// represented as a D-Bus type.
+func MakeVariant(v interface{}) Variant {
+ return Variant{SignatureOf(v), v}
+}
+
+// ParseVariant parses the given string as a variant as described at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
+// empty, it is taken to be the expected signature for the variant.
+func ParseVariant(s string, sig Signature) (Variant, error) {
+ tokens := varLex(s)
+ p := &varParser{tokens: tokens}
+ n, err := varMakeNode(p)
+ if err != nil {
+ return Variant{}, err
+ }
+ if sig.str == "" {
+ sig, err = varInfer(n)
+ if err != nil {
+ return Variant{}, err
+ }
+ }
+ v, err := n.Value(sig)
+ if err != nil {
+ return Variant{}, err
+ }
+ return MakeVariant(v), nil
+}
+
+// format returns a formatted version of v and whether this string can be parsed
+// unambigously.
+func (v Variant) format() (string, bool) {
+ switch v.sig.str[0] {
+ case 'b', 'i':
+ return fmt.Sprint(v.value), true
+ case 'n', 'q', 'u', 'x', 't', 'd', 'h':
+ return fmt.Sprint(v.value), false
+ case 's':
+ return strconv.Quote(v.value.(string)), true
+ case 'o':
+ return strconv.Quote(string(v.value.(ObjectPath))), false
+ case 'g':
+ return strconv.Quote(v.value.(Signature).str), false
+ case 'v':
+ s, unamb := v.value.(Variant).format()
+ if !unamb {
+ return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
+ }
+ return "<" + s + ">", true
+ case 'y':
+ return fmt.Sprintf("%#x", v.value.(byte)), false
+ }
+ rv := reflect.ValueOf(v.value)
+ switch rv.Kind() {
+ case reflect.Slice:
+ if rv.Len() == 0 {
+ return "[]", false
+ }
+ unamb := true
+ buf := bytes.NewBuffer([]byte("["))
+ for i := 0; i < rv.Len(); i++ {
+ // TODO: slooow
+ s, b := MakeVariant(rv.Index(i).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ if i != rv.Len()-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String(), unamb
+ case reflect.Map:
+ if rv.Len() == 0 {
+ return "{}", false
+ }
+ unamb := true
+ var buf bytes.Buffer
+ kvs := make([]string, rv.Len())
+ for i, k := range rv.MapKeys() {
+ s, b := MakeVariant(k.Interface()).format()
+ unamb = unamb && b
+ buf.Reset()
+ buf.WriteString(s)
+ buf.WriteString(": ")
+ s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ kvs[i] = buf.String()
+ }
+ buf.Reset()
+ buf.WriteByte('{')
+ sort.Strings(kvs)
+ for i, kv := range kvs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(kv)
+ }
+ buf.WriteByte('}')
+ return buf.String(), unamb
+ }
+ return `"INVALID"`, true
+}
+
+// Signature returns the D-Bus signature of the underlying value of v.
+func (v Variant) Signature() Signature {
+ return v.sig
+}
+
+// String returns the string representation of the underlying value of v as
+// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
+func (v Variant) String() string {
+ s, unamb := v.format()
+ if !unamb {
+ return "@" + v.sig.str + " " + s
+ }
+ return s
+}
+
+// Value returns the underlying value of v.
+func (v Variant) Value() interface{} {
+ return v.value
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go
new file mode 100644
index 0000000..332007d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go
@@ -0,0 +1,284 @@
+package dbus
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Heavily inspired by the lexer from text/template.
+
+type varToken struct {
+ typ varTokenType
+ val string
+}
+
+type varTokenType byte
+
+const (
+ tokEOF varTokenType = iota
+ tokError
+ tokNumber
+ tokString
+ tokBool
+ tokArrayStart
+ tokArrayEnd
+ tokDictStart
+ tokDictEnd
+ tokVariantStart
+ tokVariantEnd
+ tokComma
+ tokColon
+ tokType
+ tokByteString
+)
+
+type varLexer struct {
+ input string
+ start int
+ pos int
+ width int
+ tokens []varToken
+}
+
+type lexState func(*varLexer) lexState
+
+func varLex(s string) []varToken {
+ l := &varLexer{input: s}
+ l.run()
+ return l.tokens
+}
+
+func (l *varLexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *varLexer) backup() {
+ l.pos -= l.width
+}
+
+func (l *varLexer) emit(t varTokenType) {
+ l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
+ l.start = l.pos
+}
+
+func (l *varLexer) errorf(format string, v ...interface{}) lexState {
+ l.tokens = append(l.tokens, varToken{
+ tokError,
+ fmt.Sprintf(format, v...),
+ })
+ return nil
+}
+
+func (l *varLexer) ignore() {
+ l.start = l.pos
+}
+
+func (l *varLexer) next() rune {
+ var r rune
+
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return -1
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+func (l *varLexer) run() {
+ for state := varLexNormal; state != nil; {
+ state = state(l)
+ }
+}
+
+func (l *varLexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func varLexNormal(l *varLexer) lexState {
+ for {
+ r := l.next()
+ switch {
+ case r == -1:
+ l.emit(tokEOF)
+ return nil
+ case r == '[':
+ l.emit(tokArrayStart)
+ case r == ']':
+ l.emit(tokArrayEnd)
+ case r == '{':
+ l.emit(tokDictStart)
+ case r == '}':
+ l.emit(tokDictEnd)
+ case r == '<':
+ l.emit(tokVariantStart)
+ case r == '>':
+ l.emit(tokVariantEnd)
+ case r == ':':
+ l.emit(tokColon)
+ case r == ',':
+ l.emit(tokComma)
+ case r == '\'' || r == '"':
+ l.backup()
+ return varLexString
+ case r == '@':
+ l.backup()
+ return varLexType
+ case unicode.IsSpace(r):
+ l.ignore()
+ case unicode.IsNumber(r) || r == '+' || r == '-':
+ l.backup()
+ return varLexNumber
+ case r == 'b':
+ pos := l.start
+ if n := l.peek(); n == '"' || n == '\'' {
+ return varLexByteString
+ }
+ // not a byte string; try to parse it as a type or bool below
+ l.pos = pos + 1
+ l.width = 1
+ fallthrough
+ default:
+ // either a bool or a type. Try bools first.
+ l.backup()
+ if l.pos+4 <= len(l.input) {
+ if l.input[l.pos:l.pos+4] == "true" {
+ l.pos += 4
+ l.emit(tokBool)
+ continue
+ }
+ }
+ if l.pos+5 <= len(l.input) {
+ if l.input[l.pos:l.pos+5] == "false" {
+ l.pos += 5
+ l.emit(tokBool)
+ continue
+ }
+ }
+ // must be a type.
+ return varLexType
+ }
+ }
+}
+
+var varTypeMap = map[string]string{
+ "boolean": "b",
+ "byte": "y",
+ "int16": "n",
+ "uint16": "q",
+ "int32": "i",
+ "uint32": "u",
+ "int64": "x",
+ "uint64": "t",
+ "double": "f",
+ "string": "s",
+ "objectpath": "o",
+ "signature": "g",
+}
+
+func varLexByteString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated bytestring")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokByteString)
+ return varLexNormal
+}
+
+func varLexNumber(l *varLexer) lexState {
+ l.accept("+-")
+ digits := "0123456789"
+ if l.accept("0") {
+ if l.accept("x") {
+ digits = "0123456789abcdefABCDEF"
+ } else {
+ digits = "01234567"
+ }
+ }
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ if l.accept(".") {
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ for strings.IndexRune("0123456789", l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if r := l.peek(); unicode.IsLetter(r) {
+ l.next()
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokNumber)
+ return varLexNormal
+}
+
+func varLexString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated string")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokString)
+ return varLexNormal
+}
+
+func varLexType(l *varLexer) lexState {
+ at := l.accept("@")
+ for {
+ r := l.next()
+ if r == -1 {
+ break
+ }
+ if unicode.IsSpace(r) {
+ l.backup()
+ break
+ }
+ }
+ if at {
+ if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
+ return l.errorf("%s", err)
+ }
+ } else {
+ if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
+ l.emit(tokType)
+ return varLexNormal
+ }
+ return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokType)
+ return varLexNormal
+}
diff --git a/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go b/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go
new file mode 100644
index 0000000..d20f5da
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go
@@ -0,0 +1,817 @@
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type varParser struct {
+ tokens []varToken
+ i int
+}
+
+func (p *varParser) backup() {
+ p.i--
+}
+
+func (p *varParser) next() varToken {
+ if p.i < len(p.tokens) {
+ t := p.tokens[p.i]
+ p.i++
+ return t
+ }
+ return varToken{typ: tokEOF}
+}
+
+type varNode interface {
+ Infer() (Signature, error)
+ String() string
+ Sigs() sigSet
+ Value(Signature) (interface{}, error)
+}
+
+func varMakeNode(p *varParser) (varNode, error) {
+ var sig Signature
+
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokNumber:
+ return varMakeNumNode(t, sig)
+ case tokString:
+ return varMakeStringNode(t, sig)
+ case tokBool:
+ if sig.str != "" && sig.str != "b" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := strconv.ParseBool(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return boolNode(b), nil
+ case tokArrayStart:
+ return varMakeArrayNode(p, sig)
+ case tokVariantStart:
+ return varMakeVariantNode(p, sig)
+ case tokDictStart:
+ return varMakeDictNode(p, sig)
+ case tokType:
+ if sig.str != "" {
+ return nil, errors.New("unexpected type annotation")
+ }
+ if t.val[0] == '@' {
+ sig.str = t.val[1:]
+ } else {
+ sig.str = varTypeMap[t.val]
+ }
+ case tokByteString:
+ if sig.str != "" && sig.str != "ay" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := varParseByteString(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return byteStringNode(b), nil
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+}
+
+type varTypeError struct {
+ val string
+ sig Signature
+}
+
+func (e varTypeError) Error() string {
+ return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
+}
+
+type sigSet map[Signature]bool
+
+func (s sigSet) Empty() bool {
+ return len(s) == 0
+}
+
+func (s sigSet) Intersect(s2 sigSet) sigSet {
+ r := make(sigSet)
+ for k := range s {
+ if s2[k] {
+ r[k] = true
+ }
+ }
+ return r
+}
+
+func (s sigSet) Single() (Signature, bool) {
+ if len(s) == 1 {
+ for k := range s {
+ return k, true
+ }
+ }
+ return Signature{}, false
+}
+
+func (s sigSet) ToArray() sigSet {
+ r := make(sigSet, len(s))
+ for k := range s {
+ r[Signature{"a" + k.str}] = true
+ }
+ return r
+}
+
+type numNode struct {
+ sig Signature
+ str string
+ val interface{}
+}
+
+var numSigSet = sigSet{
+ Signature{"y"}: true,
+ Signature{"n"}: true,
+ Signature{"q"}: true,
+ Signature{"i"}: true,
+ Signature{"u"}: true,
+ Signature{"x"}: true,
+ Signature{"t"}: true,
+ Signature{"d"}: true,
+}
+
+func (n numNode) Infer() (Signature, error) {
+ if strings.ContainsAny(n.str, ".e") {
+ return Signature{"d"}, nil
+ }
+ return Signature{"i"}, nil
+}
+
+func (n numNode) String() string {
+ return n.str
+}
+
+func (n numNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ if strings.ContainsAny(n.str, ".e") {
+ return sigSet{Signature{"d"}: true}
+ }
+ return numSigSet
+}
+
+func (n numNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ return varNumAs(n.str, sig)
+}
+
+func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str == "" {
+ return numNode{str: tok.val}, nil
+ }
+ num, err := varNumAs(tok.val, sig)
+ if err != nil {
+ return nil, err
+ }
+ return numNode{sig: sig, val: num}, nil
+}
+
+func varNumAs(s string, sig Signature) (interface{}, error) {
+ isUnsigned := false
+ size := 32
+ switch sig.str {
+ case "n":
+ size = 16
+ case "i":
+ case "x":
+ size = 64
+ case "y":
+ size = 8
+ isUnsigned = true
+ case "q":
+ size = 16
+ isUnsigned = true
+ case "u":
+ isUnsigned = true
+ case "t":
+ size = 64
+ isUnsigned = true
+ case "d":
+ d, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ default:
+ return nil, varTypeError{s, sig}
+ }
+ base := 10
+ if strings.HasPrefix(s, "0x") {
+ base = 16
+ s = s[2:]
+ }
+ if strings.HasPrefix(s, "0") && len(s) != 1 {
+ base = 8
+ s = s[1:]
+ }
+ if isUnsigned {
+ i, err := strconv.ParseUint(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "y":
+ v = byte(i)
+ case "q":
+ v = uint16(i)
+ case "u":
+ v = uint32(i)
+ }
+ return v, nil
+ }
+ i, err := strconv.ParseInt(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "n":
+ v = int16(i)
+ case "i":
+ v = int32(i)
+ }
+ return v, nil
+}
+
+type stringNode struct {
+ sig Signature
+ str string // parsed
+ val interface{} // has correct type
+}
+
+var stringSigSet = sigSet{
+ Signature{"s"}: true,
+ Signature{"g"}: true,
+ Signature{"o"}: true,
+}
+
+func (n stringNode) Infer() (Signature, error) {
+ return Signature{"s"}, nil
+}
+
+func (n stringNode) String() string {
+ return n.str
+}
+
+func (n stringNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ return stringSigSet
+}
+
+func (n stringNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ switch {
+ case sig.str == "g":
+ return Signature{n.str}, nil
+ case sig.str == "o":
+ return ObjectPath(n.str), nil
+ case sig.str == "s":
+ return n.str, nil
+ default:
+ return nil, varTypeError{n.str, sig}
+ }
+}
+
+func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
+ return nil, fmt.Errorf("invalid type %q for string", sig.str)
+ }
+ s, err := varParseString(tok.val)
+ if err != nil {
+ return nil, err
+ }
+ n := stringNode{str: s}
+ if sig.str == "" {
+ return stringNode{str: s}, nil
+ }
+ n.sig = sig
+ switch sig.str {
+ case "o":
+ n.val = ObjectPath(s)
+ case "g":
+ n.val = Signature{s}
+ case "s":
+ n.val = s
+ }
+ return n, nil
+}
+
+func varParseString(s string) (string, error) {
+ // quotes are guaranteed to be there
+ s = s[1 : len(s)-1]
+ buf := new(bytes.Buffer)
+ for len(s) != 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ if r != '\\' {
+ buf.WriteRune(r)
+ continue
+ }
+ r, size = utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ switch r {
+ case 'a':
+ buf.WriteRune(0x7)
+ case 'b':
+ buf.WriteRune(0x8)
+ case 'f':
+ buf.WriteRune(0xc)
+ case 'n':
+ buf.WriteRune('\n')
+ case 'r':
+ buf.WriteRune('\r')
+ case 't':
+ buf.WriteRune('\t')
+ case '\n':
+ case 'u':
+ if len(s) < 4 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:4], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[4:]
+ case 'U':
+ if len(s) < 8 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:8], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[8:]
+ default:
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String(), nil
+}
+
+var boolSigSet = sigSet{Signature{"b"}: true}
+
+type boolNode bool
+
+func (boolNode) Infer() (Signature, error) {
+ return Signature{"b"}, nil
+}
+
+func (b boolNode) String() string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+func (boolNode) Sigs() sigSet {
+ return boolSigSet
+}
+
+func (b boolNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "b" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return bool(b), nil
+}
+
+type arrayNode struct {
+ set sigSet
+ children []varNode
+ val interface{}
+}
+
+func (n arrayNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ csig, err := varInfer(v)
+ if err != nil {
+ continue
+ }
+ return Signature{"a" + csig.str}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n arrayNode) String() string {
+ s := "["
+ for i, v := range n.children {
+ s += v.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "]"
+}
+
+func (n arrayNode) Sigs() sigSet {
+ return n.set
+}
+
+func (n arrayNode) Value(sig Signature) (interface{}, error) {
+ if n.set.Empty() {
+ // no type information whatsoever, so this must be an empty slice
+ return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
+ }
+ if !n.set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
+ for i, v := range n.children {
+ rv, err := v.Value(Signature{sig.str[1:]})
+ if err != nil {
+ return nil, err
+ }
+ s.Index(i).Set(reflect.ValueOf(rv))
+ }
+ return s.Interface(), nil
+}
+
+func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
+ var n arrayNode
+ if sig.str != "" {
+ n.set = sigSet{sig: true}
+ }
+ if t := p.next(); t.typ == tokArrayEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ cn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if cset := cn.Sigs(); !cset.Empty() {
+ if n.set.Empty() {
+ n.set = cset.ToArray()
+ } else {
+ nset := cset.ToArray().Intersect(n.set)
+ if nset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
+ }
+ n.set = nset
+ }
+ }
+ n.children = append(n.children, cn)
+ switch t := p.next(); t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokArrayEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type variantNode struct {
+ n varNode
+}
+
+var variantSet = sigSet{
+ Signature{"v"}: true,
+}
+
+func (variantNode) Infer() (Signature, error) {
+ return Signature{"v"}, nil
+}
+
+func (n variantNode) String() string {
+ return "<" + n.n.String() + ">"
+}
+
+func (variantNode) Sigs() sigSet {
+ return variantSet
+}
+
+func (n variantNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "v" {
+ return nil, varTypeError{n.String(), sig}
+ }
+ sig, err := varInfer(n.n)
+ if err != nil {
+ return nil, err
+ }
+ v, err := n.n.Value(sig)
+ if err != nil {
+ return nil, err
+ }
+ return MakeVariant(v), nil
+}
+
+func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
+ n, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if t := p.next(); t.typ != tokVariantEnd {
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ vn := variantNode{n}
+ if sig.str != "" && sig.str != "v" {
+ return nil, varTypeError{vn.String(), sig}
+ }
+ return variantNode{n}, nil
+}
+
+type dictEntry struct {
+ key, val varNode
+}
+
+type dictNode struct {
+ kset, vset sigSet
+ children []dictEntry
+ val interface{}
+}
+
+func (n dictNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ ksig, err := varInfer(v.key)
+ if err != nil {
+ continue
+ }
+ vsig, err := varInfer(v.val)
+ if err != nil {
+ continue
+ }
+ return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n dictNode) String() string {
+ s := "{"
+ for i, v := range n.children {
+ s += v.key.String() + ": " + v.val.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "}"
+}
+
+func (n dictNode) Sigs() sigSet {
+ r := sigSet{}
+ for k := range n.kset {
+ for v := range n.vset {
+ sig := "a{" + k.str + v.str + "}"
+ r[Signature{sig}] = true
+ }
+ }
+ return r
+}
+
+func (n dictNode) Value(sig Signature) (interface{}, error) {
+ set := n.Sigs()
+ if set.Empty() {
+ // no type information -> empty dict
+ return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
+ }
+ if !set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ m := reflect.MakeMap(typeFor(sig.str))
+ ksig := Signature{sig.str[2:3]}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ for _, v := range n.children {
+ kv, err := v.key.Value(ksig)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := v.val.Value(vsig)
+ if err != nil {
+ return nil, err
+ }
+ m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return m.Interface(), nil
+}
+
+func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
+ var n dictNode
+
+ if sig.str != "" {
+ if len(sig.str) < 5 {
+ return nil, fmt.Errorf("invalid signature %q for dict type", sig)
+ }
+ ksig := Signature{string(sig.str[2])}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ n.kset = sigSet{ksig: true}
+ n.vset = sigSet{vsig: true}
+ }
+ if t := p.next(); t.typ == tokDictEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ kn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if kset := kn.Sigs(); !kset.Empty() {
+ if n.kset.Empty() {
+ n.kset = kset
+ } else {
+ n.kset = kset.Intersect(n.kset)
+ if n.kset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
+ }
+ }
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokColon:
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ vn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if vset := vn.Sigs(); !vset.Empty() {
+ if n.vset.Empty() {
+ n.vset = vset
+ } else {
+ n.vset = n.vset.Intersect(vset)
+ if n.vset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
+ }
+ }
+ }
+ n.children = append(n.children, dictEntry{kn, vn})
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokDictEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type byteStringNode []byte
+
+var byteStringSet = sigSet{
+ Signature{"ay"}: true,
+}
+
+func (byteStringNode) Infer() (Signature, error) {
+ return Signature{"ay"}, nil
+}
+
+func (b byteStringNode) String() string {
+ return string(b)
+}
+
+func (b byteStringNode) Sigs() sigSet {
+ return byteStringSet
+}
+
+func (b byteStringNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "ay" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return []byte(b), nil
+}
+
+func varParseByteString(s string) ([]byte, error) {
+ // quotes and b at start are guaranteed to be there
+ b := make([]byte, 0, 1)
+ s = s[2 : len(s)-1]
+ for len(s) != 0 {
+ c := s[0]
+ s = s[1:]
+ if c != '\\' {
+ b = append(b, c)
+ continue
+ }
+ c = s[0]
+ s = s[1:]
+ switch c {
+ case 'a':
+ b = append(b, 0x7)
+ case 'b':
+ b = append(b, 0x8)
+ case 'f':
+ b = append(b, 0xc)
+ case 'n':
+ b = append(b, '\n')
+ case 'r':
+ b = append(b, '\r')
+ case 't':
+ b = append(b, '\t')
+ case 'x':
+ if len(s) < 2 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:2], 16, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[2:]
+ case '0':
+ if len(s) < 3 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:3], 8, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[3:]
+ default:
+ b = append(b, c)
+ }
+ }
+ return append(b, 0), nil
+}
+
+func varInfer(n varNode) (Signature, error) {
+ if sig, ok := n.Sigs().Single(); ok {
+ return sig, nil
+ }
+ return n.Infer()
+}
diff --git a/Godeps/_workspace/src/github.com/golang/glog/LICENSE b/Godeps/_workspace/src/github.com/golang/glog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/golang/glog/README b/Godeps/_workspace/src/github.com/golang/glog/README
new file mode 100644
index 0000000..387b4eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation for the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog.go b/Godeps/_workspace/src/github.com/golang/glog/glog.go
new file mode 100644
index 0000000..54bd7af
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/glog/glog.go
@@ -0,0 +1,1180 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// glog.Info("Prepare to repel boarders")
+//
+// glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if glog.V(2) {
+// glog.Info("Starting transaction...")
+// }
+//
+// glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=false
+// Logs are written to standard error instead of to files.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ infoLog severity = iota
+ warningLog
+ errorLog
+ fatalLog
+ numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+ return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+ atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+ return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+ return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+ var threshold severity
+ // Is it a known name?
+ if v, ok := severityByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ threshold = severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+func severityByName(s string) (severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range severityName {
+ if name == s {
+ return severity(i), true
+ }
+ }
+ return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+ infoLog: &Stats.Info,
+ warningLog: &Stats.Warning,
+ errorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.Atoi(value)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.Atoi(patLev[1])
+ if err != nil {
+ return errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ t.line = 0
+ t.file = ""
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+func init() {
+ flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+ flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+ flag.Var(&logging.verbosity, "v", "log level for V logs")
+ flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+ flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+ // Default stderrThreshold is ERROR.
+ logging.stderrThreshold = errorLog
+
+ logging.setVState(0, nil, false)
+ go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severity // The -stderrthreshold flag.
+
+ // freeList is a list of byte buffers, maintained under freeListMu.
+ freeList *buffer
+ // freeListMu maintains the free list. It is separate from the main mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ freeListMu sync.Mutex
+
+ // mu protects the remaining elements of this structure and is
+ // used to synchronize logging.
+ mu sync.Mutex
+ // file holds writer for each of the log types.
+ file [numSeverity]flushSyncWriter
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+ bytes.Buffer
+ tmp [64]byte // temporary byte array for creating headers.
+ next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ logging.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&logging.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ logging.vmodule.filter = filter
+ logging.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+ logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+ l.freeListMu.Lock()
+ b := l.freeList
+ if b != nil {
+ l.freeList = b.next
+ }
+ l.freeListMu.Unlock()
+ if b == nil {
+ b = new(buffer)
+ } else {
+ b.next = nil
+ b.Reset()
+ }
+ return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death.
+ return
+ }
+ l.freeListMu.Lock()
+ b.next = l.freeList
+ l.freeList = b
+ l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+ now := timeNow()
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > fatalLog {
+ s = infoLog // for safety.
+ }
+ buf := l.getBuffer()
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.tmp[0] = severityChar[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.tmp[21] = ' '
+ buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+ buf.tmp[29] = ' '
+ buf.Write(buf.tmp[:30])
+ buf.WriteString(file)
+ buf.tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.tmp[n+1] = ']'
+ buf.tmp[n+2] = ' '
+ buf.Write(buf.tmp[:n+3])
+ return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+ buf.tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.tmp)
+ for {
+ j--
+ buf.tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintln(buf, args...)
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+ l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+ l.mu.Lock()
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if !flag.Parsed() {
+ os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
+ os.Stderr.Write(data)
+ } else if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ switch s {
+ case fatalLog:
+ l.file[fatalLog].Write(data)
+ fallthrough
+ case errorLog:
+ l.file[errorLog].Write(data)
+ fallthrough
+ case warningLog:
+ l.file[warningLog].Write(data)
+ fallthrough
+ case infoLog:
+ l.file[infoLog].Write(data)
+ }
+ }
+ if s == fatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ // First, make sure we see the trace for the current goroutine on standard error.
+ // If -logtostderr has been specified, the loop below will do that anyway
+ // as the first stack in the full dump.
+ if !l.toStderr {
+ os.Stderr.Write(stacks(false))
+ }
+ // Write the stack trace for all goroutines to the files.
+ trace := stacks(true)
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := fatalLog; log >= infoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ l.putBuffer(buf)
+ l.mu.Unlock()
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+ }
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity
+ nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= MaxSize {
+ if err := sb.rotateFile(time.Now()); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severityName[sb.sev], now)
+ sb.nbytes = 0
+ if err != nil {
+ return err
+ }
+
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ }
+ if err := sb.rotateFile(now); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+ for _ = range time.NewTicker(flushInterval).C {
+ l.lockAndFlushAll()
+ }
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := fatalLog; s >= infoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity(lb), file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+// if glog.V(2) { glog.Info("log this") }
+// or
+// glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return Verbose(true)
+ }
+
+ // It's off globally but it vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2, logging.pcs[:]) == 0 {
+ return Verbose(false)
+ }
+ v, ok := logging.vmap[logging.pcs[0]]
+ if !ok {
+ v = logging.setV(logging.pcs[0])
+ }
+ return Verbose(v >= level)
+ }
+ return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v {
+ logging.print(infoLog, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v {
+ logging.println(infoLog, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v {
+ logging.printf(infoLog, format, args...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+ logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+ logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+ logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+ logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(fatalLog, format, args...)
+}
diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_file.go b/Godeps/_workspace/src/github.com/golang/glog/glog_file.go
new file mode 100644
index 0000000..65075d2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+ if *logDir != "" {
+ logDirs = append(logDirs, *logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+)
+
+func init() {
+ h, err := os.Hostname()
+ if err == nil {
+ host = shortHostname(h)
+ }
+
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+
+ // Sanitize userName since it may contain filepath separators on Windows.
+ userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ userName,
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := os.Create(fname)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE b/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..e2e0651
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e98ddec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,223 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..5810782
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,867 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..231b074
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1325 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f5db1de
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,276 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..054f4f1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,399 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..0de8f8d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,894 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..e25e01e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..749919d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..e9be0fe
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..4fe2ec2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,846 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..37c9535
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,849 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..b5e1c8e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,871 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..824bf2e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,14 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Sebastien Binet
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..9f54f21
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,36 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Russ Cox
+Sebastien Binet
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/README b/Godeps/_workspace/src/github.com/golang/snappy/README
new file mode 100644
index 0000000..5074bba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/README
@@ -0,0 +1,7 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/decode.go b/Godeps/_workspace/src/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..e7f1259
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/decode.go
@@ -0,0 +1,294 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) < dLen {
+ dst = make([]byte, dLen)
+ }
+
+ var d, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-1])
+ case x == 61:
+ s += 3
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-2]) | uint(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
+ }
+ length = int(x + 1)
+ if length <= 0 {
+ return nil, errors.New("snappy: unsupported literal length")
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return nil, ErrCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
+
+ case tagCopy2:
+ s += 3
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(src[s-2]) | int(src[s-1])<<8
+
+ case tagCopy4:
+ return nil, errors.New("snappy: unsupported COPY_4 tag")
+ }
+
+ end := d + length
+ if offset > d || end > len(dst) {
+ return nil, ErrCorrupt
+ }
+ for ; d < end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != dLen {
+ return nil, ErrCorrupt
+ }
+ return dst[:d], nil
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxUncompressedChunkLen),
+ buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
+ }
+}
+
+// Reader is an io.Reader than can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4]) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if !r.readFull(r.decoded[:n]) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)]) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen]) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/encode.go b/Godeps/_workspace/src/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..f3b5484
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/encode.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// We limit how far copy back-references can go, the same as the C++ code.
+const maxOffset = 1 << 15
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ case n < 1<<16:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ case n < 1<<24:
+ dst[0] = 62<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ dst[3] = uint8(n >> 16)
+ i = 4
+ case int64(n) < 1<<32:
+ dst[0] = 63<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ dst[3] = uint8(n >> 16)
+ dst[4] = uint8(n >> 24)
+ i = 5
+ default:
+ panic("snappy: source buffer is too long")
+ }
+ if copy(dst[i:], lit) != len(lit) {
+ panic("snappy: destination buffer is too short")
+ }
+ return i + len(lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ for length > 0 {
+ x := length - 4
+ if 0 <= x && x < 1<<3 && offset < 1<<11 {
+ dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ i += 2
+ break
+ }
+
+ x = length
+ if x > 1<<6 {
+ x = 1 << 6
+ }
+ dst[i+0] = uint8(x-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= x
+ }
+ return i
+}
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ // Return early if src is short.
+ if len(src) <= 4 {
+ if len(src) != 0 {
+ d += emitLiteral(dst[d:], src)
+ }
+ return dst[:d]
+ }
+
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ const maxTableSize = 1 << 14
+ shift, tableSize := uint(32-8), 1<<8
+ for tableSize < maxTableSize && tableSize < len(src) {
+ shift--
+ tableSize *= 2
+ }
+ var table [maxTableSize]int
+
+ // Iterate over the source bytes.
+ var (
+ s int // The iterator position.
+ t int // The last position with the same hash as s.
+ lit int // The start position of any pending literal bytes.
+ )
+ for s+3 < len(src) {
+ // Update the hash table.
+ b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
+ h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
+ p := &table[(h*0x1e35a7bd)>>shift]
+ // We need to to store values in [-1, inf) in table. To save
+ // some initialization time, (re)use the table's zero value
+ // and shift the values against this zero: add 1 on writes,
+ // subtract 1 on reads.
+ t, *p = *p-1, s+1
+ // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
+ if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
+ s++
+ continue
+ }
+ // Otherwise, we have a match. First, emit any pending literal bytes.
+ if lit != s {
+ d += emitLiteral(dst[d:], src[lit:s])
+ }
+ // Extend the match to be as long as possible.
+ s0 := s
+ s, t = s+4, t+4
+ for s < len(src) && src[s] == src[t] {
+ s++
+ t++
+ }
+ // Emit the copied bytes.
+ d += emitCopy(dst[d:], s-t, s-s0)
+ lit = s
+ }
+
+ // Emit any final pending literal bytes and return.
+ if lit != len(src) {
+ d += emitLiteral(dst[d:], src[lit:])
+ }
+ return dst[:d]
+}
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+func MaxEncodedLen(srcLen int) int {
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ return 32 + srcLen + srcLen/6
+}
+
+// NewWriter returns a new Writer that compresses to w, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
+ }
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+ enc []byte
+ buf [checksumSize + chunkHeaderSize]byte
+ wroteHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ w.wroteHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (n int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ if !w.wroteHeader {
+ copy(w.enc, magicChunk)
+ if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
+ w.err = err
+ return n, err
+ }
+ w.wroteHeader = true
+ }
+ for len(p) > 0 {
+ var uncompressed []byte
+ if len(p) > maxUncompressedChunkLen {
+ uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkBody := Encode(w.enc, uncompressed)
+ if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
+ }
+
+ chunkLen := 4 + len(chunkBody)
+ w.buf[0] = chunkType
+ w.buf[1] = uint8(chunkLen >> 0)
+ w.buf[2] = uint8(chunkLen >> 8)
+ w.buf[3] = uint8(chunkLen >> 16)
+ w.buf[4] = uint8(checksum >> 0)
+ w.buf[5] = uint8(checksum >> 8)
+ w.buf[6] = uint8(checksum >> 16)
+ w.buf[7] = uint8(checksum >> 24)
+ if _, err := w.w.Write(w.buf[:]); err != nil {
+ w.err = err
+ return n, err
+ }
+ if _, err := w.w.Write(chunkBody); err != nil {
+ w.err = err
+ return n, err
+ }
+ n += len(uncompressed)
+ }
+ return n, nil
+}
diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..e98653a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go
@@ -0,0 +1,68 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer supported.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536 bytes".
+ maxUncompressedChunkLen = 65536
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE
new file mode 100644
index 0000000..d502227
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2015 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md
new file mode 100644
index 0000000..7aae45f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md
@@ -0,0 +1,19 @@
+# List
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
+- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
+- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- glyphicons [LICENSE](http://glyphicons.com/license/)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md
new file mode 100644
index 0000000..8a04112
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md
@@ -0,0 +1,256 @@
+# InfluxDB Client
+
+[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client/v2)
+
+## Description
+
+**NOTE:** The Go client library now has a "v2" version, with the old version
+being deprecated. The new version can be imported at
+`import "github.com/influxdb/influxdb/client/v2"`. It is not backwards-compatible.
+
+A Go client library written and maintained by the **InfluxDB** team.
+This package provides convenience functions to read and write time series data.
+It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
+
+
+## Getting Started
+
+### Connecting To Your Database
+
+Connecting to an **InfluxDB** database is straightforward. You will need a host
+name, a port and the cluster user credentials if applicable. The default port is
+8086. You can customize these settings to your specific installation via the
+**InfluxDB** configuration file.
+
+Thought not necessary for experimentation, you may want to create a new user
+and authenticate the connection to your database.
+
+For more information please check out the
+[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html).
+
+For the impatient, you can create a new admin user _bubba_ by firing off the
+[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go).
+
+```shell
+influx
+> create user bubba with password 'bumblebeetuna'
+> grant all privileges to bubba
+```
+
+And now for good measure set the credentials in you shell environment.
+In the example below we will use $INFLUX_USER and $INFLUX_PWD
+
+Now with the administrivia out of the way, let's connect to our database.
+
+NOTE: If you've opted out of creating a user, you can omit Username and Password in
+the configuration below.
+
+```go
+package main
+
+import
+import (
+ "net/url"
+ "fmt"
+ "log"
+ "os"
+
+ "github.com/influxdb/influxdb/client/v2"
+)
+
+const (
+ MyDB = "square_holes"
+ username = "bubba"
+ password = "bumblebeetuna"
+)
+
+func main() {
+ // Make client
+ u, _ := url.Parse("http://localhost:8086")
+ c := client.NewClient(client.Config{
+ URL: u,
+ Username: username,
+ Password: password,
+ })
+
+ // Create a new point batch
+ bp := client.NewBatchPoints(client.BatchPointsConfig{
+ Database: MyDB,
+ Precision: "s",
+ })
+
+ // Create a point and add to batch
+ tags := map[string]string{"cpu": "cpu-total"}
+ fields := map[string]interface{}{
+ "idle": 10.1,
+ "system": 53.3,
+ "user": 46.6,
+ }
+ pt := client.NewPoint("cpu_usage", tags, fields, time.Now())
+ bp.AddPoint(pt)
+
+ // Write the batch
+ c.Write(bp)
+}
+
+```
+
+### Inserting Data
+
+Time series data aka *points* are written to the database using batch inserts.
+The mechanism is to create one or more points and then create a batch aka
+*batch points* and write these to a given database and series. A series is a
+combination of a measurement (time/values) and a set of tags.
+
+In this sample we will create a batch of a 1,000 points. Each point has a time and
+a single value as well as 2 tags indicating a shape and color. We write these points
+to a database called _square_holes_ using a measurement named _shapes_.
+
+NOTE: You can specify a RetentionPolicy as part of the batch points. If not
+provided InfluxDB will use the database _default_ retention policy.
+
+```go
+func writePoints(clnt client.Client) {
+ sampleSize := 1000
+ rand.Seed(42)
+
+ bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+ Database: "systemstats",
+ Precision: "us",
+ })
+
+ for i := 0; i < sampleSize; i++ {
+ regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
+ tags := map[string]string{
+ "cpu": "cpu-total",
+ "host": fmt.Sprintf("host%d", rand.Intn(1000)),
+ "region": regions[rand.Intn(len(regions))],
+ }
+
+ idle := rand.Float64() * 100.0
+ fields := map[string]interface{}{
+ "idle": idle,
+ "busy": 100.0 - idle,
+ }
+
+ bp.AddPoint(client.NewPoint(
+ "cpu_usage",
+ tags,
+ fields,
+ time.Now(),
+ ))
+ }
+
+ err := clnt.Write(bp)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+
+### Querying Data
+
+One nice advantage of using **InfluxDB** the ability to query your data using familiar
+SQL constructs. In this example we can create a convenience function to query the database
+as follows:
+
+```go
+// queryDB convenience function to query the database
+func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
+ q := client.Query{
+ Command: cmd,
+ Database: MyDB,
+ }
+ if response, err := clnt.Query(q); err == nil {
+ if response.Error() != nil {
+ return res, response.Error()
+ }
+ res = response.Results
+ }
+ return res, nil
+}
+```
+
+#### Creating a Database
+
+```go
+_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+#### Count Records
+
+```go
+q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
+res, err := queryDB(clnt, q)
+if err != nil {
+ log.Fatal(err)
+}
+count := res[0].Series[0].Values[0][1]
+log.Printf("Found a total of %v records\n", count)
+```
+
+#### Find the last 10 _shapes_ records
+
+```go
+q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
+res, err = queryDB(clnt, q)
+if err != nil {
+ log.Fatal(err)
+}
+
+for i, row := range res[0].Series[0].Values {
+ t, err := time.Parse(time.RFC3339, row[0].(string))
+ if err != nil {
+ log.Fatal(err)
+ }
+ val := row[1].(string)
+ log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
+}
+```
+
+### Using the UDP Client
+
+The **InfluxDB** client also supports writing over UDP.
+
+```go
+func WriteUDP() {
+ // Make client
+ c := client.NewUDPClient("localhost:8089")
+
+ // Create a new point batch
+ bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+ Precision: "s",
+ })
+
+ // Create a point and add to batch
+ tags := map[string]string{"cpu": "cpu-total"}
+ fields := map[string]interface{}{
+ "idle": 10.1,
+ "system": 53.3,
+ "user": 46.6,
+ }
+ pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+ if err != nil {
+ panic(err.Error())
+ }
+ bp.AddPoint(pt)
+
+ // Write the batch
+ c.Write(bp)
+}
+```
+
+## Go Docs
+
+Please refer to
+[http://godoc.org/github.com/influxdb/influxdb/client/v2](http://godoc.org/github.com/influxdb/influxdb/client/v2)
+for documentation.
+
+## See Also
+
+You can also examine how the client library is used by the
+[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go).
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go
new file mode 100644
index 0000000..9e0d727
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go
@@ -0,0 +1,688 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdb/influxdb/models"
+)
+
+const (
+ // DefaultHost is the default host used to connect to an InfluxDB instance
+ DefaultHost = "localhost"
+
+ // DefaultPort is the default port used to connect to an InfluxDB instance
+ DefaultPort = 8086
+
+ // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
+ DefaultTimeout = 0
+)
+
+// Query is used to send a command to the server. Both Command and Database are required.
+type Query struct {
+ Command string
+ Database string
+}
+
+// ParseConnectionString will parse a string to create a valid connection URL
+func ParseConnectionString(path string, ssl bool) (url.URL, error) {
+ var host string
+ var port int
+
+ h, p, err := net.SplitHostPort(path)
+ if err != nil {
+ if path == "" {
+ host = DefaultHost
+ } else {
+ host = path
+ }
+ // If they didn't specify a port, always use the default port
+ port = DefaultPort
+ } else {
+ host = h
+ port, err = strconv.Atoi(p)
+ if err != nil {
+ return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
+ }
+ }
+
+ u := url.URL{
+ Scheme: "http",
+ }
+ if ssl {
+ u.Scheme = "https"
+ }
+
+ u.Host = net.JoinHostPort(host, strconv.Itoa(port))
+
+ return u, nil
+}
+
+// Config is used to specify what server to connect to.
+// URL: The URL of the server connecting to.
+// Username/Password are optional. They will be passed via basic auth if provided.
+// UserAgent: If not provided, will default "InfluxDBClient",
+// Timeout: If not provided, will default to 0 (no timeout)
+type Config struct {
+ URL url.URL
+ Username string
+ Password string
+ UserAgent string
+ Timeout time.Duration
+ Precision string
+}
+
+// NewConfig will create a config to be used in connecting to the client
+func NewConfig() Config {
+ return Config{
+ Timeout: DefaultTimeout,
+ }
+}
+
+// Client is used to make calls to the server.
+type Client struct {
+ url url.URL
+ username string
+ password string
+ httpClient *http.Client
+ userAgent string
+ precision string
+}
+
+const (
+ // ConsistencyOne requires at least one data node acknowledged a write.
+ ConsistencyOne = "one"
+
+ // ConsistencyAll requires all data nodes to acknowledge a write.
+ ConsistencyAll = "all"
+
+ // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
+ ConsistencyQuorum = "quorum"
+
+ // ConsistencyAny allows for hinted hand off, potentially no write happened yet.
+ ConsistencyAny = "any"
+)
+
+// NewClient will instantiate and return a connected client to issue commands to the server.
+func NewClient(c Config) (*Client, error) {
+ client := Client{
+ url: c.URL,
+ username: c.Username,
+ password: c.Password,
+ httpClient: &http.Client{Timeout: c.Timeout},
+ userAgent: c.UserAgent,
+ precision: c.Precision,
+ }
+ if client.userAgent == "" {
+ client.userAgent = "InfluxDBClient"
+ }
+ return &client, nil
+}
+
+// SetAuth will update the username and passwords
+func (c *Client) SetAuth(u, p string) {
+ c.username = u
+ c.password = p
+}
+
+// SetPrecision will update the precision
+func (c *Client) SetPrecision(precision string) {
+ c.precision = precision
+}
+
+// Query sends a command to the server and returns the Response
+func (c *Client) Query(q Query) (*Response, error) {
+ u := c.url
+
+ u.Path = "query"
+ values := u.Query()
+ values.Set("q", q.Command)
+ values.Set("db", q.Database)
+ if c.precision != "" {
+ values.Set("epoch", c.precision)
+ }
+ u.RawQuery = values.Encode()
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ dec := json.NewDecoder(resp.Body)
+ dec.UseNumber()
+ decErr := dec.Decode(&response)
+
+ // ignore this error if we got an invalid status code
+ if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
+ decErr = nil
+ }
+ // If we got a valid decode error, send that back
+ if decErr != nil {
+ return nil, decErr
+ }
+ // If we don't have an error in our json response, and didn't get StatusOK, then send back an error
+ if resp.StatusCode != http.StatusOK && response.Error() == nil {
+ return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+ }
+ return &response, nil
+}
+
+// Write takes BatchPoints and allows for writing of multiple points with defaults
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) Write(bp BatchPoints) (*Response, error) {
+ u := c.url
+ u.Path = "write"
+
+ var b bytes.Buffer
+ for _, p := range bp.Points {
+ if p.Raw != "" {
+ if _, err := b.WriteString(p.Raw); err != nil {
+ return nil, err
+ }
+ } else {
+ for k, v := range bp.Tags {
+ if p.Tags == nil {
+ p.Tags = make(map[string]string, len(bp.Tags))
+ }
+ p.Tags[k] = v
+ }
+
+ if _, err := b.WriteString(p.MarshalString()); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := b.WriteByte('\n'); err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest("POST", u.String(), &b)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ precision := bp.Precision
+ if precision == "" {
+ precision = c.precision
+ }
+
+ params := req.URL.Query()
+ params.Set("db", bp.Database)
+ params.Set("rp", bp.RetentionPolicy)
+ params.Set("precision", precision)
+ params.Set("consistency", bp.WriteConsistency)
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ var err = fmt.Errorf(string(body))
+ response.Err = err
+ return &response, err
+ }
+
+ return nil, nil
+}
+
+// WriteLineProtocol takes a string with line returns to delimit each write
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
+ u := c.url
+ u.Path = "write"
+
+ r := strings.NewReader(data)
+
+ req, err := http.NewRequest("POST", u.String(), r)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+ params := req.URL.Query()
+ params.Set("db", database)
+ params.Set("rp", retentionPolicy)
+ params.Set("precision", precision)
+ params.Set("consistency", writeConsistency)
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ err := fmt.Errorf(string(body))
+ response.Err = err
+ return &response, err
+ }
+
+ return nil, nil
+}
+
+// Ping will check to see if the server is up
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *Client) Ping() (time.Duration, string, error) {
+ now := time.Now()
+ u := c.url
+ u.Path = "ping"
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return 0, "", err
+ }
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return 0, "", err
+ }
+ defer resp.Body.Close()
+
+ version := resp.Header.Get("X-Influxdb-Version")
+ return time.Since(now), version, nil
+}
+
+// Structs
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+ Series []models.Row
+ Err error
+}
+
+// MarshalJSON encodes the result into JSON.
+func (r *Result) MarshalJSON() ([]byte, error) {
+ // Define a struct that outputs "error" as a string.
+ var o struct {
+ Series []models.Row `json:"series,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ // Copy fields to output struct.
+ o.Series = r.Series
+ if r.Err != nil {
+ o.Err = r.Err.Error()
+ }
+
+ return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Result struct
+func (r *Result) UnmarshalJSON(b []byte) error {
+ var o struct {
+ Series []models.Row `json:"series,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ err := dec.Decode(&o)
+ if err != nil {
+ return err
+ }
+ r.Series = o.Series
+ if o.Err != "" {
+ r.Err = errors.New(o.Err)
+ }
+ return nil
+}
+
+// Response represents a list of statement results.
+type Response struct {
+ Results []Result
+ Err error
+}
+
+// MarshalJSON encodes the response into JSON.
+func (r *Response) MarshalJSON() ([]byte, error) {
+ // Define a struct that outputs "error" as a string.
+ var o struct {
+ Results []Result `json:"results,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ // Copy fields to output struct.
+ o.Results = r.Results
+ if r.Err != nil {
+ o.Err = r.Err.Error()
+ }
+
+ return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Response struct
+func (r *Response) UnmarshalJSON(b []byte) error {
+ var o struct {
+ Results []Result `json:"results,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ err := dec.Decode(&o)
+ if err != nil {
+ return err
+ }
+ r.Results = o.Results
+ if o.Err != "" {
+ r.Err = errors.New(o.Err)
+ }
+ return nil
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r Response) Error() error {
+ if r.Err != nil {
+ return r.Err
+ }
+ for _, result := range r.Results {
+ if result.Err != nil {
+ return result.Err
+ }
+ }
+ return nil
+}
+
+// Point defines the fields that will be written to the database
+// Measurement, Time, and Fields are required
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type Point struct {
+ Measurement string
+ Tags map[string]string
+ Time time.Time
+ Fields map[string]interface{}
+ Precision string
+ Raw string
+}
+
+// MarshalJSON will format the time in RFC3339Nano
+// Precision is also ignored as it is only used for writing, not reading
+// Or another way to say it is we always send back in nanosecond precision
+func (p *Point) MarshalJSON() ([]byte, error) {
+ point := struct {
+ Measurement string `json:"measurement,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Time string `json:"time,omitempty"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ Precision string `json:"precision,omitempty"`
+ }{
+ Measurement: p.Measurement,
+ Tags: p.Tags,
+ Fields: p.Fields,
+ Precision: p.Precision,
+ }
+ // Let it omit empty if it's really zero
+ if !p.Time.IsZero() {
+ point.Time = p.Time.UTC().Format(time.RFC3339Nano)
+ }
+ return json.Marshal(&point)
+}
+
+// MarshalString renders string representation of a Point with specified
+// precision. The default precision is nanoseconds.
+func (p *Point) MarshalString() string {
+ pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
+ if err != nil {
+ return "# ERROR: " + err.Error() + " " + p.Measurement
+ }
+ if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
+ return pt.String()
+ }
+ return pt.PrecisionString(p.Precision)
+}
+
+// UnmarshalJSON decodes the data into the Point struct
+func (p *Point) UnmarshalJSON(b []byte) error {
+ var normal struct {
+ Measurement string `json:"measurement"`
+ Tags map[string]string `json:"tags"`
+ Time time.Time `json:"time"`
+ Precision string `json:"precision"`
+ Fields map[string]interface{} `json:"fields"`
+ }
+ var epoch struct {
+ Measurement string `json:"measurement"`
+ Tags map[string]string `json:"tags"`
+ Time *int64 `json:"time"`
+ Precision string `json:"precision"`
+ Fields map[string]interface{} `json:"fields"`
+ }
+
+ if err := func() error {
+ var err error
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ if err = dec.Decode(&epoch); err != nil {
+ return err
+ }
+ // Convert from epoch to time.Time, but only if Time
+ // was actually set.
+ var ts time.Time
+ if epoch.Time != nil {
+ ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+ if err != nil {
+ return err
+ }
+ }
+ p.Measurement = epoch.Measurement
+ p.Tags = epoch.Tags
+ p.Time = ts
+ p.Precision = epoch.Precision
+ p.Fields = normalizeFields(epoch.Fields)
+ return nil
+ }(); err == nil {
+ return nil
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ if err := dec.Decode(&normal); err != nil {
+ return err
+ }
+ normal.Time = SetPrecision(normal.Time, normal.Precision)
+ p.Measurement = normal.Measurement
+ p.Tags = normal.Tags
+ p.Time = normal.Time
+ p.Precision = normal.Precision
+ p.Fields = normalizeFields(normal.Fields)
+
+ return nil
+}
+
+// Remove any notion of json.Number
+func normalizeFields(fields map[string]interface{}) map[string]interface{} {
+ newFields := map[string]interface{}{}
+
+ for k, v := range fields {
+ switch v := v.(type) {
+ case json.Number:
+ jv, e := v.Float64()
+ if e != nil {
+ panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
+ }
+ newFields[k] = jv
+ default:
+ newFields[k] = v
+ }
+ }
+ return newFields
+}
+
+// BatchPoints is used to send batched data in a single write.
+// Database and Points are required
+// If no retention policy is specified, it will use the databases default retention policy.
+// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
+// If time is specified, it will be applied to any point with an empty time.
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type BatchPoints struct {
+ Points []Point `json:"points,omitempty"`
+ Database string `json:"database,omitempty"`
+ RetentionPolicy string `json:"retentionPolicy,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Time time.Time `json:"time,omitempty"`
+ Precision string `json:"precision,omitempty"`
+ WriteConsistency string `json:"-"`
+}
+
+// UnmarshalJSON decodes the data into the BatchPoints struct
+func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
+ var normal struct {
+ Points []Point `json:"points"`
+ Database string `json:"database"`
+ RetentionPolicy string `json:"retentionPolicy"`
+ Tags map[string]string `json:"tags"`
+ Time time.Time `json:"time"`
+ Precision string `json:"precision"`
+ }
+ var epoch struct {
+ Points []Point `json:"points"`
+ Database string `json:"database"`
+ RetentionPolicy string `json:"retentionPolicy"`
+ Tags map[string]string `json:"tags"`
+ Time *int64 `json:"time"`
+ Precision string `json:"precision"`
+ }
+
+ if err := func() error {
+ var err error
+ if err = json.Unmarshal(b, &epoch); err != nil {
+ return err
+ }
+ // Convert from epoch to time.Time
+ var ts time.Time
+ if epoch.Time != nil {
+ ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+ if err != nil {
+ return err
+ }
+ }
+ bp.Points = epoch.Points
+ bp.Database = epoch.Database
+ bp.RetentionPolicy = epoch.RetentionPolicy
+ bp.Tags = epoch.Tags
+ bp.Time = ts
+ bp.Precision = epoch.Precision
+ return nil
+ }(); err == nil {
+ return nil
+ }
+
+ if err := json.Unmarshal(b, &normal); err != nil {
+ return err
+ }
+ normal.Time = SetPrecision(normal.Time, normal.Precision)
+ bp.Points = normal.Points
+ bp.Database = normal.Database
+ bp.RetentionPolicy = normal.RetentionPolicy
+ bp.Tags = normal.Tags
+ bp.Time = normal.Time
+ bp.Precision = normal.Precision
+
+ return nil
+}
+
+// utility functions
+
+// Addr provides the current url as a string of the server the client is connected to.
+func (c *Client) Addr() string {
+ return c.url.String()
+}
+
+// helper functions
+
+// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
+func EpochToTime(epoch int64, precision string) (time.Time, error) {
+ if precision == "" {
+ precision = "s"
+ }
+ var t time.Time
+ switch precision {
+ case "h":
+ t = time.Unix(0, epoch*int64(time.Hour))
+ case "m":
+ t = time.Unix(0, epoch*int64(time.Minute))
+ case "s":
+ t = time.Unix(0, epoch*int64(time.Second))
+ case "ms":
+ t = time.Unix(0, epoch*int64(time.Millisecond))
+ case "u":
+ t = time.Unix(0, epoch*int64(time.Microsecond))
+ case "n":
+ t = time.Unix(0, epoch)
+ default:
+ return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
+ }
+ return t, nil
+}
+
+// SetPrecision will round a time to the specified precision
+func SetPrecision(t time.Time, precision string) time.Time {
+ switch precision {
+ case "n":
+ case "u":
+ return t.Round(time.Microsecond)
+ case "ms":
+ return t.Round(time.Millisecond)
+ case "s":
+ return t.Round(time.Second)
+ case "m":
+ return t.Round(time.Minute)
+ case "h":
+ return t.Round(time.Hour)
+ }
+ return t
+}
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go
new file mode 100644
index 0000000..4dce0c2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go
@@ -0,0 +1,498 @@
+package client
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/influxdb/influxdb/models"
+)
+
+// UDPPayloadSize is a reasonable default payload size for UDP packets that
+// could be travelling over the internet.
+const (
+ UDPPayloadSize = 512
+)
+
+type HTTPConfig struct {
+ // Addr should be of the form "http://host:port"
+ // or "http://[ipv6-host%zone]:port".
+ Addr string
+
+ // Username is the influxdb username, optional
+ Username string
+
+ // Password is the influxdb password, optional
+ Password string
+
+ // UserAgent is the http User Agent, defaults to "InfluxDBClient"
+ UserAgent string
+
+ // Timeout for influxdb writes, defaults to no timeout
+ Timeout time.Duration
+
+ // InsecureSkipVerify gets passed to the http client, if true, it will
+ // skip https certificate verification. Defaults to false
+ InsecureSkipVerify bool
+}
+
+type UDPConfig struct {
+ // Addr should be of the form "host:port"
+ // or "[ipv6-host%zone]:port".
+ Addr string
+
+ // PayloadSize is the maximum size of a UDP client message, optional
+ // Tune this based on your network. Defaults to UDPBufferSize.
+ PayloadSize int
+}
+
+type BatchPointsConfig struct {
+ // Precision is the write precision of the points, defaults to "ns"
+ Precision string
+
+ // Database is the database to write points to
+ Database string
+
+ // RetentionPolicy is the retention policy of the points
+ RetentionPolicy string
+
+ // Write consistency is the number of servers required to confirm write
+ WriteConsistency string
+}
+
+// Client is a client interface for writing & querying the database
+type Client interface {
+ // Write takes a BatchPoints object and writes all Points to InfluxDB.
+ Write(bp BatchPoints) error
+
+ // Query makes an InfluxDB Query on the database. This will fail if using
+ // the UDP client.
+ Query(q Query) (*Response, error)
+
+ // Close releases any resources a Client may be using.
+ Close() error
+}
+
+// NewClient creates a client interface from the given config.
+func NewHTTPClient(conf HTTPConfig) (Client, error) {
+ if conf.UserAgent == "" {
+ conf.UserAgent = "InfluxDBClient"
+ }
+
+ u, err := url.Parse(conf.Addr)
+ if err != nil {
+ return nil, err
+ } else if u.Scheme != "http" && u.Scheme != "https" {
+ m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
+ " must start with http:// or https://", u.Scheme)
+ return nil, errors.New(m)
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: conf.InsecureSkipVerify,
+ },
+ }
+ return &client{
+ url: u,
+ username: conf.Username,
+ password: conf.Password,
+ useragent: conf.UserAgent,
+ httpClient: &http.Client{
+ Timeout: conf.Timeout,
+ Transport: tr,
+ },
+ }, nil
+}
+
+// Close releases the client's resources.
+func (c *client) Close() error {
+ return nil
+}
+
+// NewUDPClient returns a client interface for writing to an InfluxDB UDP
+// service from the given config.
+func NewUDPClient(conf UDPConfig) (Client, error) {
+ var udpAddr *net.UDPAddr
+ udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
+ if err != nil {
+ return nil, err
+ }
+
+ conn, err := net.DialUDP("udp", nil, udpAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ payloadSize := conf.PayloadSize
+ if payloadSize == 0 {
+ payloadSize = UDPPayloadSize
+ }
+
+ return &udpclient{
+ conn: conn,
+ payloadSize: payloadSize,
+ }, nil
+}
+
+// Close releases the udpclient's resources.
+func (uc *udpclient) Close() error {
+ return uc.conn.Close()
+}
+
+type client struct {
+ url *url.URL
+ username string
+ password string
+ useragent string
+ httpClient *http.Client
+}
+
+type udpclient struct {
+ conn *net.UDPConn
+ payloadSize int
+}
+
+// BatchPoints is an interface into a batched grouping of points to write into
+// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
+// batch for each goroutine.
+type BatchPoints interface {
+ // AddPoint adds the given point to the Batch of points
+ AddPoint(p *Point)
+ // Points lists the points in the Batch
+ Points() []*Point
+
+ // Precision returns the currently set precision of this Batch
+ Precision() string
+ // SetPrecision sets the precision of this batch.
+ SetPrecision(s string) error
+
+ // Database returns the currently set database of this Batch
+ Database() string
+ // SetDatabase sets the database of this Batch
+ SetDatabase(s string)
+
+ // WriteConsistency returns the currently set write consistency of this Batch
+ WriteConsistency() string
+ // SetWriteConsistency sets the write consistency of this Batch
+ SetWriteConsistency(s string)
+
+ // RetentionPolicy returns the currently set retention policy of this Batch
+ RetentionPolicy() string
+ // SetRetentionPolicy sets the retention policy of this Batch
+ SetRetentionPolicy(s string)
+}
+
+// NewBatchPoints returns a BatchPoints interface based on the given config.
+func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
+ if conf.Precision == "" {
+ conf.Precision = "ns"
+ }
+ if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
+ return nil, err
+ }
+ bp := &batchpoints{
+ database: conf.Database,
+ precision: conf.Precision,
+ retentionPolicy: conf.RetentionPolicy,
+ writeConsistency: conf.WriteConsistency,
+ }
+ return bp, nil
+}
+
+type batchpoints struct {
+ points []*Point
+ database string
+ precision string
+ retentionPolicy string
+ writeConsistency string
+}
+
+func (bp *batchpoints) AddPoint(p *Point) {
+ bp.points = append(bp.points, p)
+}
+
+func (bp *batchpoints) Points() []*Point {
+ return bp.points
+}
+
+func (bp *batchpoints) Precision() string {
+ return bp.precision
+}
+
+func (bp *batchpoints) Database() string {
+ return bp.database
+}
+
+func (bp *batchpoints) WriteConsistency() string {
+ return bp.writeConsistency
+}
+
+func (bp *batchpoints) RetentionPolicy() string {
+ return bp.retentionPolicy
+}
+
+func (bp *batchpoints) SetPrecision(p string) error {
+ if _, err := time.ParseDuration("1" + p); err != nil {
+ return err
+ }
+ bp.precision = p
+ return nil
+}
+
+func (bp *batchpoints) SetDatabase(db string) {
+ bp.database = db
+}
+
+func (bp *batchpoints) SetWriteConsistency(wc string) {
+ bp.writeConsistency = wc
+}
+
+func (bp *batchpoints) SetRetentionPolicy(rp string) {
+ bp.retentionPolicy = rp
+}
+
+type Point struct {
+ pt models.Point
+}
+
+// NewPoint returns a point with the given timestamp. If a timestamp is not
+// given, then data is sent to the database without a timestamp, in which case
+// the server will assign local time upon reception. NOTE: it is recommended
+// to send data without a timestamp.
+func NewPoint(
+ name string,
+ tags map[string]string,
+ fields map[string]interface{},
+ t ...time.Time,
+) (*Point, error) {
+ var T time.Time
+ if len(t) > 0 {
+ T = t[0]
+ }
+
+ pt, err := models.NewPoint(name, tags, fields, T)
+ if err != nil {
+ return nil, err
+ }
+ return &Point{
+ pt: pt,
+ }, nil
+}
+
+// String returns a line-protocol string of the Point
+func (p *Point) String() string {
+ return p.pt.String()
+}
+
+// PrecisionString returns a line-protocol string of the Point, at precision
+func (p *Point) PrecisionString(precison string) string {
+ return p.pt.PrecisionString(precison)
+}
+
+// Name returns the measurement name of the point
+func (p *Point) Name() string {
+ return p.pt.Name()
+}
+
+// Name returns the tags associated with the point
+func (p *Point) Tags() map[string]string {
+ return p.pt.Tags()
+}
+
+// Time return the timestamp for the point
+func (p *Point) Time() time.Time {
+ return p.pt.Time()
+}
+
+// UnixNano returns the unix nano time of the point
+func (p *Point) UnixNano() int64 {
+ return p.pt.UnixNano()
+}
+
+// Fields returns the fields for the point
+func (p *Point) Fields() map[string]interface{} {
+ return p.pt.Fields()
+}
+
+func (uc *udpclient) Write(bp BatchPoints) error {
+ var b bytes.Buffer
+ var d time.Duration
+ d, _ = time.ParseDuration("1" + bp.Precision())
+
+ for _, p := range bp.Points() {
+ pointstring := p.pt.RoundedString(d) + "\n"
+
+ // Write and reset the buffer if we reach the max size
+ if b.Len()+len(pointstring) >= uc.payloadSize {
+ if _, err := uc.conn.Write(b.Bytes()); err != nil {
+ return err
+ }
+ b.Reset()
+ }
+
+ if _, err := b.WriteString(pointstring); err != nil {
+ return err
+ }
+ }
+
+ _, err := uc.conn.Write(b.Bytes())
+ return err
+}
+
+func (c *client) Write(bp BatchPoints) error {
+ var b bytes.Buffer
+
+ for _, p := range bp.Points() {
+ if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
+ return err
+ }
+
+ if err := b.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ u := c.url
+ u.Path = "write"
+ req, err := http.NewRequest("POST", u.String(), &b)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.useragent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ params := req.URL.Query()
+ params.Set("db", bp.Database())
+ params.Set("rp", bp.RetentionPolicy())
+ params.Set("precision", bp.Precision())
+ params.Set("consistency", bp.WriteConsistency())
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ var err = fmt.Errorf(string(body))
+ return err
+ }
+
+ return nil
+}
+
+// Query defines a query to send to the server
+type Query struct {
+ Command string
+ Database string
+ Precision string
+}
+
+// NewQuery returns a query object
+// database and precision strings can be empty strings if they are not needed
+// for the query.
+func NewQuery(command, database, precision string) Query {
+ return Query{
+ Command: command,
+ Database: database,
+ Precision: precision,
+ }
+}
+
+// Response represents a list of statement results.
+type Response struct {
+ Results []Result
+ Err error
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+ if r.Err != nil {
+ return r.Err
+ }
+ for _, result := range r.Results {
+ if result.Err != nil {
+ return result.Err
+ }
+ }
+ return nil
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+ Series []models.Row
+ Err error
+}
+
+func (uc *udpclient) Query(q Query) (*Response, error) {
+ return nil, fmt.Errorf("Querying via UDP is not supported")
+}
+
+// Query sends a command to the server and returns the Response
+func (c *client) Query(q Query) (*Response, error) {
+ u := c.url
+ u.Path = "query"
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.useragent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ params := req.URL.Query()
+ params.Set("q", q.Command)
+ params.Set("db", q.Database)
+ if q.Precision != "" {
+ params.Set("epoch", q.Precision)
+ }
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ dec := json.NewDecoder(resp.Body)
+ dec.UseNumber()
+ decErr := dec.Decode(&response)
+
+ // ignore this error if we got an invalid status code
+ if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
+ decErr = nil
+ }
+ // If we got a valid decode error, send that back
+ if decErr != nil {
+ return nil, decErr
+ }
+ // If we don't have an error in our json response, and didn't get statusOK
+ // then send back an error
+ if resp.StatusCode != http.StatusOK && response.Error() == nil {
+ return &response, fmt.Errorf("received status code %d from server",
+ resp.StatusCode)
+ }
+ return &response, nil
+}
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/models/points.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/models/points.go
new file mode 100644
index 0000000..6cdaba3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/models/points.go
@@ -0,0 +1,1385 @@
+package models
+
+import (
+ "bytes"
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdb/influxdb/pkg/escape"
+)
+
+var (
+ measurementEscapeCodes = map[byte][]byte{
+ ',': []byte(`\,`),
+ ' ': []byte(`\ `),
+ }
+
+ tagEscapeCodes = map[byte][]byte{
+ ',': []byte(`\,`),
+ ' ': []byte(`\ `),
+ '=': []byte(`\=`),
+ }
+)
+
+// Point defines the values that will be written to the database
+type Point interface {
+ Name() string
+ SetName(string)
+
+ Tags() Tags
+ AddTag(key, value string)
+ SetTags(tags Tags)
+
+ Fields() Fields
+ AddField(name string, value interface{})
+
+ Time() time.Time
+ SetTime(t time.Time)
+ UnixNano() int64
+
+ HashID() uint64
+ Key() []byte
+
+ Data() []byte
+ SetData(buf []byte)
+
+ // String returns a string representation of the point object, if there is a
+ // timestamp associated with the point then it will be specified with the default
+ // precision of nanoseconds
+ String() string
+
+ // PrecisionString returns a string representation of the point object, if there
+ // is a timestamp associated with the point then it will be specified in the
+ // given unit
+ PrecisionString(precision string) string
+
+ // RoundedString returns a string representation of the point object, if there
+ // is a timestamp associated with the point, then it will be rounded to the
+ // given duration
+ RoundedString(d time.Duration) string
+}
+
+// Points represents a sortable list of points by timestamp.
+type Points []Point
+
+func (a Points) Len() int { return len(a) }
+func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
+func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// point is the default implementation of Point.
+type point struct {
+ time time.Time
+
+ // text encoding of measurement and tags
+ // key must always be stored sorted by tags, if the original line was not sorted,
+ // we need to resort it
+ key []byte
+
+ // text encoding of field data
+ fields []byte
+
+ // text encoding of timestamp
+ ts []byte
+
+ // binary encoded field data
+ data []byte
+
+ // cached version of parsed fields from data
+ cachedFields map[string]interface{}
+
+ // cached version of parsed name from key
+ cachedName string
+}
+
+const (
+ // the number of characters for the largest possible int64 (9223372036854775807)
+ maxInt64Digits = 19
+
+ // the number of characters for the smallest possible int64 (-9223372036854775808)
+ minInt64Digits = 20
+
+ // the number of characters required for the largest float64 before a range check
+ // would occur during parsing
+ maxFloat64Digits = 25
+
+ // the number of characters required for smallest float64 before a range check occur
+ // would occur during parsing
+ minFloat64Digits = 27
+)
+
+var ()
+
+func ParsePointsString(buf string) ([]Point, error) {
+ return ParsePoints([]byte(buf))
+}
+
+// ParsePoints returns a slice of Points from a text representation of a point
+// with each point separated by newlines. If any points fail to parse, a non-nil error
+// will be returned in addition to the points that parsed successfully.
+func ParsePoints(buf []byte) ([]Point, error) {
+ return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
+}
+
+func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
+ points := []Point{}
+ var (
+ pos int
+ block []byte
+ failed []string
+ )
+ for {
+ pos, block = scanLine(buf, pos)
+ pos += 1
+
+ if len(block) == 0 {
+ break
+ }
+
+ // lines which start with '#' are comments
+ start := skipWhitespace(block, 0)
+
+ // If line is all whitespace, just skip it
+ if start >= len(block) {
+ continue
+ }
+
+ if block[start] == '#' {
+ continue
+ }
+
+ // strip the newline if one is present
+ if block[len(block)-1] == '\n' {
+ block = block[:len(block)-1]
+ }
+
+ pt, err := parsePoint(block[start:len(block)], defaultTime, precision)
+ if err != nil {
+ failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err))
+ } else {
+ points = append(points, pt)
+ }
+
+ if pos >= len(buf) {
+ break
+ }
+
+ }
+ if len(failed) > 0 {
+ return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
+ }
+ return points, nil
+
+}
+
+func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
+ // scan the first block which is measurement[,tag1=value1,tag2=value=2...]
+ pos, key, err := scanKey(buf, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // measurement name is required
+ if len(key) == 0 {
+ return nil, fmt.Errorf("missing measurement")
+ }
+
+ // scan the second block is which is field1=value1[,field2=value2,...]
+ pos, fields, err := scanFields(buf, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ // at least one field is required
+ if len(fields) == 0 {
+ return nil, fmt.Errorf("missing fields")
+ }
+
+ // scan the last block which is an optional integer timestamp
+ pos, ts, err := scanTime(buf, pos)
+
+ if err != nil {
+ return nil, err
+ }
+
+ pt := &point{
+ key: key,
+ fields: fields,
+ ts: ts,
+ }
+
+ if len(ts) == 0 {
+ pt.time = defaultTime
+ pt.SetPrecision(precision)
+ } else {
+ ts, err := strconv.ParseInt(string(ts), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ pt.time = time.Unix(0, ts*pt.GetPrecisionMultiplier(precision)).UTC()
+ }
+ return pt, nil
+}
+
+// scanKey scans buf starting at i for the measurement and tag portion of the point.
+// It returns the ending position and the byte slice of key within buf. If there
+// are tags, they will be sorted if they are not already.
+func scanKey(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+
+ i = start
+
+ // Determines whether the tags are sort, assume they are
+ sorted := true
+
+ // indices holds the indexes within buf of the start of each tag. For example,
+ // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
+ // which indicates that the first tag starts at buf[4], seconds at buf[11], and
+ // last at buf[20]
+ indices := make([]int, 100)
+
+ // tracks how many commas we've seen so we know how many values are indices.
+ // Since indices is an arbitrarily large slice,
+ // we need to know how many values in the buffer are in use.
+ commas := 0
+
+ // tracks whether we've see an '='
+ equals := 0
+
+ // loop over each byte in buf
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ if equals == 0 && commas > 0 {
+ return i, buf[start:i], fmt.Errorf("missing tag value")
+ }
+
+ break
+ }
+
+ // equals is special in the tags section. It must be escaped if part of a tag key or value.
+ // It does not need to be escaped if part of the measurement.
+ if buf[i] == '=' && commas > 0 {
+ if i-1 < 0 || i-2 < 0 {
+ return i, buf[start:i], fmt.Errorf("missing tag key")
+ }
+
+ // Check for "cpu,=value" but allow "cpu,a\,=value"
+ if buf[i-1] == ',' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing tag key")
+ }
+
+ // Check for "cpu,\ =value"
+ if buf[i-1] == ' ' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing tag key")
+ }
+
+ i += 1
+ equals += 1
+
+ // Check for "cpu,a=1,b= value=1" or "cpu,a=1,b=,c=foo value=1"
+ if i < len(buf) && (buf[i] == ' ' || buf[i] == ',') {
+ return i, buf[start:i], fmt.Errorf("missing tag value")
+ }
+ continue
+ }
+
+ // escaped character
+ if buf[i] == '\\' {
+ i += 2
+ continue
+ }
+
+ // At a tag separator (comma), track it's location
+ if buf[i] == ',' {
+ if equals == 0 && commas > 0 {
+ return i, buf[start:i], fmt.Errorf("missing tag value")
+ }
+ i += 1
+
+ // grow our indices slice if we have too many tags
+ if commas >= len(indices) {
+ newIndics := make([]int, cap(indices)*2)
+ copy(newIndics, indices)
+ indices = newIndics
+ }
+ indices[commas] = i
+ commas += 1
+
+ // Check for "cpu, value=1"
+ if i < len(buf) && buf[i] == ' ' {
+ return i, buf[start:i], fmt.Errorf("missing tag key")
+ }
+ continue
+ }
+
+ // reached end of the block? (next block would be fields)
+ if buf[i] == ' ' {
+ // check for "cpu,tag value=1"
+ if equals == 0 && commas > 0 {
+ return i, buf[start:i], fmt.Errorf("missing tag value")
+ }
+ if equals > 0 && commas-1 != equals-1 {
+ return i, buf[start:i], fmt.Errorf("missing tag value")
+ }
+
+ // grow our indices slice if we have too many tags
+ if commas >= len(indices) {
+ newIndics := make([]int, cap(indices)*2)
+ copy(newIndics, indices)
+ indices = newIndics
+ }
+
+ indices[commas] = i + 1
+ break
+ }
+
+ i += 1
+ }
+
+ // check that all field sections had key and values (e.g. prevent "a=1,b"
+ // We're using commas -1 because there should always be a comma after measurement
+ if equals > 0 && commas-1 != equals-1 {
+ return i, buf[start:i], fmt.Errorf("invalid tag format")
+ }
+
+ // This check makes sure we actually received fields from the user. #3379
+ // This will catch invalid syntax such as: `cpu,host=serverA,region=us-west`
+ if i >= len(buf) {
+ return i, buf[start:i], fmt.Errorf("missing fields")
+ }
+
+ // Now we know where the key region is within buf, and the locations of tags, we
+ // need to determine if duplicate tags exist and if the tags are sorted. This iterates
+ // 1/2 of the list comparing each end with each other, walking towards the center from
+ // both sides.
+ for j := 0; j < commas/2; j++ {
+ // get the left and right tags
+ _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
+ _, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=')
+
+ // If the tags are equal, then there are duplicate tags, and we should abort
+ if bytes.Equal(left, right) {
+ return i, buf[start:i], fmt.Errorf("duplicate tags")
+ }
+
+ // If left is greater than right, the tags are not sorted. We must continue
+ // since their could be duplicate tags still.
+ if bytes.Compare(left, right) > 0 {
+ sorted = false
+ }
+ }
+
+ // If the tags are not sorted, then sort them. This sort is inline and
+ // uses the tag indices we created earlier. The actual buffer is not sorted, the
+ // indices are using the buffer for value comparison. After the indices are sorted,
+ // the buffer is reconstructed from the sorted indices.
+ if !sorted && commas > 0 {
+ // Get the measurement name for later
+ measurement := buf[start : indices[0]-1]
+
+ // Sort the indices
+ indices := indices[:commas]
+ insertionSort(0, commas, buf, indices)
+
+ // Create a new key using the measurement and sorted indices
+ b := make([]byte, len(buf[start:i]))
+ pos := copy(b, measurement)
+ for _, i := range indices {
+ b[pos] = ','
+ pos += 1
+ _, v := scanToSpaceOr(buf, i, ',')
+ pos += copy(b[pos:], v)
+ }
+
+ return i, b, nil
+ }
+
+ return i, buf[start:i], nil
+}
+
+func insertionSort(l, r int, buf []byte, indices []int) {
+ for i := l + 1; i < r; i++ {
+ for j := i; j > l && less(buf, indices, j, j-1); j-- {
+ indices[j], indices[j-1] = indices[j-1], indices[j]
+ }
+ }
+}
+
+func less(buf []byte, indices []int, i, j int) bool {
+ // This grabs the tag names for i & j, it ignores the values
+ _, a := scanTo(buf, indices[i], '=')
+ _, b := scanTo(buf, indices[j], '=')
+ return bytes.Compare(a, b) < 0
+}
+
+func isFieldEscapeChar(b byte) bool {
+ for c := range escape.Codes {
+ if c == b {
+ return true
+ }
+ }
+ return false
+}
+
+// scanFields scans buf, starting at i for the fields section of a point. It returns
+// the ending position and the byte slice of the fields within buf
+func scanFields(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+ i = start
+ quoted := false
+
+ // tracks how many '=' we've seen
+ equals := 0
+
+ // tracks how many commas we've seen
+ commas := 0
+
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // escaped characters?
+ if buf[i] == '\\' && i+1 < len(buf) {
+
+ // Is this an escape char within a string field? Only " and \ are allowed.
+ if quoted && (buf[i+1] == '"' || buf[i+1] == '\\') {
+ i += 2
+ continue
+ // Non-string field escaped chars
+ } else if !quoted && isFieldEscapeChar(buf[i+1]) {
+ i += 2
+ continue
+ }
+ }
+
+ // If the value is quoted, scan until we get to the end quote
+ if buf[i] == '"' {
+ quoted = !quoted
+ i += 1
+ continue
+ }
+
+ // If we see an =, ensure that there is at least on char before and after it
+ if buf[i] == '=' && !quoted {
+ equals += 1
+
+ // check for "... =123" but allow "a\ =123"
+ if buf[i-1] == ' ' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing field key")
+ }
+
+ // check for "...a=123,=456" but allow "a=123,a\,=456"
+ if buf[i-1] == ',' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing field key")
+ }
+
+ // check for "... value="
+ if i+1 >= len(buf) {
+ return i, buf[start:i], fmt.Errorf("missing field value")
+ }
+
+ // check for "... value=,value2=..."
+ if buf[i+1] == ',' || buf[i+1] == ' ' {
+ return i, buf[start:i], fmt.Errorf("missing field value")
+ }
+
+ if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
+ var err error
+ i, err = scanNumber(buf, i+1)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ continue
+ }
+ // If next byte is not a double-quote, the value must be a boolean
+ if buf[i+1] != '"' {
+ var err error
+ i, _, err = scanBoolean(buf, i+1)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ continue
+ }
+ }
+
+ if buf[i] == ',' && !quoted {
+ commas += 1
+ }
+
+ // reached end of block?
+ if buf[i] == ' ' && !quoted {
+ break
+ }
+ i += 1
+ }
+
+ if quoted {
+ return i, buf[start:i], fmt.Errorf("unbalanced quotes")
+ }
+
+ // check that all field sections had key and values (e.g. prevent "a=1,b"
+ if equals == 0 || commas != equals-1 {
+ return i, buf[start:i], fmt.Errorf("invalid field format")
+ }
+
+ return i, buf[start:i], nil
+}
+
+// scanTime scans buf, starting at i for the time section of a point. It returns
+// the ending position and the byte slice of the fields within buf and error if the
+// timestamp is not in the correct numeric format
+func scanTime(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+ i = start
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // Timestamps should be integers, make sure they are so we don't need to actually
+ // parse the timestamp until needed
+ if buf[i] < '0' || buf[i] > '9' {
+ // Handle negative timestamps
+ if i == start && buf[i] == '-' {
+ i += 1
+ continue
+ }
+ return i, buf[start:i], fmt.Errorf("bad timestamp")
+ }
+
+ // reached end of block?
+ if buf[i] == '\n' {
+ break
+ }
+ i += 1
+ }
+ return i, buf[start:i], nil
+}
+
+func isNumeric(b byte) bool {
+ return (b >= '0' && b <= '9') || b == '.'
+}
+
+// scanNumber returns the end position within buf, start at i after
+// scanning over buf for an integer, or float. It returns an
+// error if a invalid number is scanned.
+func scanNumber(buf []byte, i int) (int, error) {
+ start := i
+ var isInt bool
+
+ // Is negative number?
+ if i < len(buf) && buf[i] == '-' {
+ i += 1
+ // There must be more characters now, as just '-' is illegal.
+ if i == len(buf) {
+ return i, fmt.Errorf("invalid number")
+ }
+ }
+
+ // how many decimal points we've see
+ decimals := 0
+
+ // indicates the number is float in scientific notation
+ scientific := false
+
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' || buf[i] == ' ' {
+ break
+ }
+
+ if buf[i] == 'i' && i > start && !isInt {
+ isInt = true
+ i += 1
+ continue
+ }
+
+ if buf[i] == '.' {
+ decimals += 1
+ }
+
+ // Can't have more than 1 decimal (e.g. 1.1.1 should fail)
+ if decimals > 1 {
+ return i, fmt.Errorf("invalid number")
+ }
+
+ // `e` is valid for floats but not as the first char
+ if i > start && (buf[i] == 'e' || buf[i] == 'E') {
+ scientific = true
+ i += 1
+ continue
+ }
+
+ // + and - are only valid at this point if they follow an e (scientific notation)
+ if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
+ i += 1
+ continue
+ }
+
+ // NaN is an unsupported value
+ if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
+ return i, fmt.Errorf("invalid number")
+ }
+
+ if !isNumeric(buf[i]) {
+ return i, fmt.Errorf("invalid number")
+ }
+ i += 1
+ }
+ if isInt && (decimals > 0 || scientific) {
+ return i, fmt.Errorf("invalid number")
+ }
+
+ // It's more common that numbers will be within min/max range for their type but we need to prevent
+ // out or range numbers from being parsed successfully. This uses some simple heuristics to decide
+ // if we should parse the number to the actual type. It does not do it all the time because it incurs
+ // extra allocations and we end up converting the type again when writing points to disk.
+ if isInt {
+ // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
+ if buf[i-1] != 'i' {
+ return i, fmt.Errorf("invalid number")
+ }
+ // Parse the int to check bounds the number of digits could be larger than the max range
+ // We subtract 1 from the index to remove the `i` from our tests
+ if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
+ if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil {
+ return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
+ }
+ }
+ } else {
+ // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
+ if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
+ if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil {
+ return i, fmt.Errorf("invalid float")
+ }
+ }
+ }
+
+ return i, nil
+}
+
+// scanBoolean returns the end position within buf, start at i after
+// scanning over buf for boolean. Valid values for a boolean are
+// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
+// is scanned.
+func scanBoolean(buf []byte, i int) (int, []byte, error) {
+ start := i
+
+ if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ i += 1
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' || buf[i] == ' ' {
+ break
+ }
+ i += 1
+ }
+
+ // Single char bool (t, T, f, F) is ok
+ if i-start == 1 {
+ return i, buf[start:i], nil
+ }
+
+ // length must be 4 for true or TRUE
+ if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ // length must be 5 for false or FALSE
+ if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ // Otherwise
+ valid := false
+ switch buf[start] {
+ case 't':
+ valid = bytes.Equal(buf[start:i], []byte("true"))
+ case 'f':
+ valid = bytes.Equal(buf[start:i], []byte("false"))
+ case 'T':
+ valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
+ case 'F':
+ valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
+ }
+
+ if !valid {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ return i, buf[start:i], nil
+
+}
+
+// skipWhitespace returns the end position within buf, starting at i after
+// scanning over spaces in tags
+func skipWhitespace(buf []byte, i int) int {
+ for i < len(buf) {
+ if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
+ break
+ }
+ i++
+ }
+ return i
+}
+
+// scanLine returns the end position in buf and the next line found within
+// buf.
+func scanLine(buf []byte, i int) (int, []byte) {
+ start := i
+ quoted := false
+ fields := false
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ' ' {
+ fields = true
+ }
+
+ // If we see a double quote, makes sure it is not escaped
+ if fields && buf[i] == '"' && (i-1 > 0 && buf[i-1] != '\\') {
+ i += 1
+ quoted = !quoted
+ continue
+ }
+
+ if buf[i] == '\n' && !quoted {
+ break
+ }
+
+ i += 1
+ }
+
+ return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte. If there are leading
+// spaces or escaped chars, they are skipped.
+func scanTo(buf []byte, i int, stop byte) (int, []byte) {
+ start := i
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == '\\' {
+ i += 2
+ continue
+ }
+
+ // reached end of block?
+ if buf[i] == stop {
+ break
+ }
+ i += 1
+ }
+
+ return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte. If there are leading
+// spaces, they are skipped.
+func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
+ start := i
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == '\\' {
+ i += 2
+ continue
+ }
+ // reached end of block?
+ if buf[i] == stop || buf[i] == ' ' {
+ break
+ }
+ i += 1
+ }
+
+ return i, buf[start:i]
+}
+
+func scanTagValue(buf []byte, i int) (int, []byte) {
+ start := i
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == '\\' {
+ i += 2
+ continue
+ }
+
+ if buf[i] == ',' {
+ break
+ }
+ i += 1
+ }
+ return i, buf[start:i]
+}
+
+func scanFieldValue(buf []byte, i int) (int, []byte) {
+ start := i
+ quoted := false
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ // Only escape char for a field value is a double-quote
+ if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' {
+ i += 2
+ continue
+ }
+
+ // Quoted value? (e.g. string)
+ if buf[i] == '"' {
+ i += 1
+ quoted = !quoted
+ continue
+ }
+
+ if buf[i] == ',' && !quoted {
+ break
+ }
+ i += 1
+ }
+ return i, buf[start:i]
+}
+
+func escapeMeasurement(in []byte) []byte {
+ for b, esc := range measurementEscapeCodes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+func unescapeMeasurement(in []byte) []byte {
+ for b, esc := range measurementEscapeCodes {
+ in = bytes.Replace(in, esc, []byte{b}, -1)
+ }
+ return in
+}
+
+func escapeTag(in []byte) []byte {
+ for b, esc := range tagEscapeCodes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+func unescapeTag(in []byte) []byte {
+ for b, esc := range tagEscapeCodes {
+ in = bytes.Replace(in, esc, []byte{b}, -1)
+ }
+ return in
+}
+
+// escapeStringField returns a copy of in with any double quotes or
+// backslashes with escaped values
+func escapeStringField(in string) string {
+ var out []byte
+ i := 0
+ for {
+ if i >= len(in) {
+ break
+ }
+ // escape double-quotes
+ if in[i] == '\\' {
+ out = append(out, '\\')
+ out = append(out, '\\')
+ i += 1
+ continue
+ }
+ // escape double-quotes
+ if in[i] == '"' {
+ out = append(out, '\\')
+ out = append(out, '"')
+ i += 1
+ continue
+ }
+ out = append(out, in[i])
+ i += 1
+
+ }
+ return string(out)
+}
+
+// unescapeStringField returns a copy of in with any escaped double-quotes
+// or backslashes unescaped
+func unescapeStringField(in string) string {
+ var out []byte
+ i := 0
+ for {
+ if i >= len(in) {
+ break
+ }
+ // unescape backslashes
+ if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
+ out = append(out, '\\')
+ i += 2
+ continue
+ }
+ // unescape double-quotes
+ if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
+ out = append(out, '"')
+ i += 2
+ continue
+ }
+ out = append(out, in[i])
+ i += 1
+
+ }
+ return string(out)
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
+// an unsupported field value (NaN) is passed, this function returns an error.
+func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) {
+ for key, value := range fields {
+ if fv, ok := value.(float64); ok {
+ // Ensure the caller validates and handles invalid field values
+ if math.IsNaN(fv) {
+ return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+ }
+ }
+ }
+
+ return &point{
+ key: MakeKey([]byte(name), tags),
+ time: time,
+ fields: fields.MarshalBinary(),
+ }, nil
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
+// an unsupported field value (NaN) is passed, this function panics.
+func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
+ pt, err := NewPoint(name, tags, fields, time)
+ if err != nil {
+ panic(err.Error())
+ }
+ return pt
+}
+
+func (p *point) Data() []byte {
+ return p.data
+}
+
+func (p *point) SetData(b []byte) {
+ p.data = b
+}
+
+func (p *point) Key() []byte {
+ return p.key
+}
+
+func (p *point) name() []byte {
+ _, name := scanTo(p.key, 0, ',')
+ return name
+}
+
+// Name return the measurement name for the point
+func (p *point) Name() string {
+ if p.cachedName != "" {
+ return p.cachedName
+ }
+ p.cachedName = string(escape.Unescape(p.name()))
+ return p.cachedName
+}
+
+// SetName updates the measurement name for the point
+func (p *point) SetName(name string) {
+ p.cachedName = ""
+ p.key = MakeKey([]byte(name), p.Tags())
+}
+
+// Time return the timestamp for the point
+func (p *point) Time() time.Time {
+ return p.time
+}
+
+// SetTime updates the timestamp for the point
+func (p *point) SetTime(t time.Time) {
+ p.time = t
+}
+
+// Tags returns the tag set for the point
+func (p *point) Tags() Tags {
+ tags := map[string]string{}
+
+ if len(p.key) != 0 {
+ pos, name := scanTo(p.key, 0, ',')
+
+ // it's an empyt key, so there are no tags
+ if len(name) == 0 {
+ return tags
+ }
+
+ i := pos + 1
+ var key, value []byte
+ for {
+ if i >= len(p.key) {
+ break
+ }
+ i, key = scanTo(p.key, i, '=')
+ i, value = scanTagValue(p.key, i+1)
+
+ if len(value) == 0 {
+ continue
+ }
+
+ tags[string(unescapeTag(key))] = string(unescapeTag(value))
+
+ i += 1
+ }
+ }
+ return tags
+}
+
+func MakeKey(name []byte, tags Tags) []byte {
+ // unescape the name and then re-escape it to avoid double escaping.
+ // The key should always be stored in escaped form.
+ return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
+}
+
+// SetTags replaces the tags for the point
+func (p *point) SetTags(tags Tags) {
+ p.key = MakeKey([]byte(p.Name()), tags)
+}
+
+// AddTag adds or replaces a tag value for a point
+func (p *point) AddTag(key, value string) {
+ tags := p.Tags()
+ tags[key] = value
+ p.key = MakeKey([]byte(p.Name()), tags)
+}
+
+// Fields returns the fields for the point
+func (p *point) Fields() Fields {
+ if p.cachedFields != nil {
+ return p.cachedFields
+ }
+ p.cachedFields = p.unmarshalBinary()
+ return p.cachedFields
+}
+
+// AddField adds or replaces a field value for a point
+func (p *point) AddField(name string, value interface{}) {
+ fields := p.Fields()
+ fields[name] = value
+ p.fields = fields.MarshalBinary()
+ p.cachedFields = nil
+}
+
+// SetPrecision will round a time to the specified precision
+func (p *point) SetPrecision(precision string) {
+ switch precision {
+ case "n":
+ case "u":
+ p.SetTime(p.Time().Truncate(time.Microsecond))
+ case "ms":
+ p.SetTime(p.Time().Truncate(time.Millisecond))
+ case "s":
+ p.SetTime(p.Time().Truncate(time.Second))
+ case "m":
+ p.SetTime(p.Time().Truncate(time.Minute))
+ case "h":
+ p.SetTime(p.Time().Truncate(time.Hour))
+ }
+}
+
+// GetPrecisionMultiplier will return a multiplier for the precision specified
+func (p *point) GetPrecisionMultiplier(precision string) int64 {
+ d := time.Nanosecond
+ switch precision {
+ case "u":
+ d = time.Microsecond
+ case "ms":
+ d = time.Millisecond
+ case "s":
+ d = time.Second
+ case "m":
+ d = time.Minute
+ case "h":
+ d = time.Hour
+ }
+ return int64(d)
+}
+
+func (p *point) String() string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), p.UnixNano())
+}
+
+func (p *point) PrecisionString(precision string) string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+ p.UnixNano()/p.GetPrecisionMultiplier(precision))
+}
+
+func (p *point) RoundedString(d time.Duration) string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+ p.time.Round(d).UnixNano())
+}
+
+func (p *point) unmarshalBinary() Fields {
+ return newFieldsFromBinary(p.fields)
+}
+
+func (p *point) HashID() uint64 {
+ h := fnv.New64a()
+ h.Write(p.key)
+ sum := h.Sum64()
+ return sum
+}
+
+func (p *point) UnixNano() int64 {
+ return p.Time().UnixNano()
+}
+
+type Tags map[string]string
+
+func (t Tags) HashKey() []byte {
+ // Empty maps marshal to empty bytes.
+ if len(t) == 0 {
+ return nil
+ }
+
+ escaped := Tags{}
+ for k, v := range t {
+ ek := escapeTag([]byte(k))
+ ev := escapeTag([]byte(v))
+
+ if len(ev) > 0 {
+ escaped[string(ek)] = string(ev)
+ }
+ }
+
+ // Extract keys and determine final size.
+ sz := len(escaped) + (len(escaped) * 2) // separators
+ keys := make([]string, len(escaped)+1)
+ i := 0
+ for k, v := range escaped {
+ keys[i] = k
+ i += 1
+ sz += len(k) + len(v)
+ }
+ keys = keys[:i]
+ sort.Strings(keys)
+ // Generate marshaled bytes.
+ b := make([]byte, sz)
+ buf := b
+ idx := 0
+ for _, k := range keys {
+ buf[idx] = ','
+ idx += 1
+ copy(buf[idx:idx+len(k)], k)
+ idx += len(k)
+ buf[idx] = '='
+ idx += 1
+ v := escaped[k]
+ copy(buf[idx:idx+len(v)], v)
+ idx += len(v)
+ }
+ return b[:idx]
+}
+
+type Fields map[string]interface{}
+
+func parseNumber(val []byte) (interface{}, error) {
+ if val[len(val)-1] == 'i' {
+ val = val[:len(val)-1]
+ return strconv.ParseInt(string(val), 10, 64)
+ }
+ for i := 0; i < len(val); i++ {
+ // If there is a decimal or an N (NaN), I (Inf), parse as float
+ if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' {
+ return strconv.ParseFloat(string(val), 64)
+ }
+ if val[i] < '0' && val[i] > '9' {
+ return string(val), nil
+ }
+ }
+ return strconv.ParseFloat(string(val), 64)
+}
+
+func newFieldsFromBinary(buf []byte) Fields {
+ fields := Fields{}
+ var (
+ i int
+ name, valueBuf []byte
+ value interface{}
+ err error
+ )
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ i, name = scanTo(buf, i, '=')
+ if len(name) == 0 {
+ continue
+ }
+ name = escape.Unescape(name)
+
+ i, valueBuf = scanFieldValue(buf, i+1)
+ if len(valueBuf) == 0 {
+ fields[string(name)] = nil
+ continue
+ }
+
+ // If the first char is a double-quote, then unmarshal as string
+ if valueBuf[0] == '"' {
+ value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1]))
+ // Check for numeric characters and special NaN or Inf
+ } else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '+' || valueBuf[0] == '.' ||
+ valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN
+ valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf
+
+ value, err = parseNumber(valueBuf)
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err))
+ }
+
+ // Otherwise parse it as bool
+ } else {
+ value, err = strconv.ParseBool(string(valueBuf))
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err))
+ }
+ }
+ fields[string(name)] = value
+ i += 1
+ }
+ return fields
+}
+
+// MarshalBinary encodes all the fields to their proper type and returns the binary
+// represenation
+// NOTE: uint64 is specifically not supported due to potential overflow when we decode
+// again later to an int64
+func (p Fields) MarshalBinary() []byte {
+ b := []byte{}
+ keys := make([]string, len(p))
+ i := 0
+ for k, _ := range p {
+ keys[i] = k
+ i += 1
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := p[k]
+ b = append(b, []byte(escape.String(k))...)
+ b = append(b, '=')
+ switch t := v.(type) {
+ case int:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case int8:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case int16:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case int32:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case int64:
+ b = append(b, []byte(strconv.FormatInt(t, 10))...)
+ b = append(b, 'i')
+ case uint:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case uint8:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case uint16:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case uint32:
+ b = append(b, []byte(strconv.FormatInt(int64(t), 10))...)
+ b = append(b, 'i')
+ case float32:
+ val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32))
+ b = append(b, val...)
+ case float64:
+ val := []byte(strconv.FormatFloat(t, 'f', -1, 64))
+ b = append(b, val...)
+ case bool:
+ b = append(b, []byte(strconv.FormatBool(t))...)
+ case []byte:
+ b = append(b, t...)
+ case string:
+ b = append(b, '"')
+ b = append(b, []byte(escapeStringField(t))...)
+ b = append(b, '"')
+ case nil:
+ // skip
+ default:
+ // Can't determine the type, so convert to string
+ b = append(b, '"')
+ b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...)
+ b = append(b, '"')
+
+ }
+ b = append(b, ',')
+ }
+ if len(b) > 0 {
+ return b[0 : len(b)-1]
+ }
+ return b
+}
+
+type indexedSlice struct {
+ indices []int
+ b []byte
+}
+
+func (s *indexedSlice) Less(i, j int) bool {
+ _, a := scanTo(s.b, s.indices[i], '=')
+ _, b := scanTo(s.b, s.indices[j], '=')
+ return bytes.Compare(a, b) < 0
+}
+
+func (s *indexedSlice) Swap(i, j int) {
+ s.indices[i], s.indices[j] = s.indices[j], s.indices[i]
+}
+
+func (s *indexedSlice) Len() int {
+ return len(s.indices)
+}
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/models/rows.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/models/rows.go
new file mode 100644
index 0000000..a4350fa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/models/rows.go
@@ -0,0 +1,59 @@
+package models
+
+import (
+ "hash/fnv"
+ "sort"
+)
+
+// Row represents a single row returned from the execution of a statement.
+type Row struct {
+ Name string `json:"name,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Columns []string `json:"columns,omitempty"`
+ Values [][]interface{} `json:"values,omitempty"`
+ Err error `json:"err,omitempty"`
+}
+
+// SameSeries returns true if r contains values for the same series as o.
+func (r *Row) SameSeries(o *Row) bool {
+ return r.tagsHash() == o.tagsHash() && r.Name == o.Name
+}
+
+// tagsHash returns a hash of tag key/value pairs.
+func (r *Row) tagsHash() uint64 {
+ h := fnv.New64a()
+ keys := r.tagsKeys()
+ for _, k := range keys {
+ h.Write([]byte(k))
+ h.Write([]byte(r.Tags[k]))
+ }
+ return h.Sum64()
+}
+
+// tagKeys returns a sorted list of tag keys.
+func (r *Row) tagsKeys() []string {
+ a := make([]string, 0, len(r.Tags))
+ for k := range r.Tags {
+ a = append(a, k)
+ }
+ sort.Strings(a)
+ return a
+}
+
+type Rows []*Row
+
+func (p Rows) Len() int { return len(p) }
+
+func (p Rows) Less(i, j int) bool {
+ // Sort by name first.
+ if p[i].Name != p[j].Name {
+ return p[i].Name < p[j].Name
+ }
+
+ // Sort by tag set hash. Tags don't have a meaningful sort order so we
+ // just compute a hash and sort by that instead. This allows the tests
+ // to receive rows in a predictable order every time.
+ return p[i].tagsHash() < p[j].tagsHash()
+}
+
+func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes.go
new file mode 100644
index 0000000..15e9cf2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes.go
@@ -0,0 +1,45 @@
+package escape
+
+import "bytes"
+
+func Bytes(in []byte) []byte {
+ for b, esc := range Codes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+func Unescape(in []byte) []byte {
+ i := 0
+ inLen := len(in)
+ var out []byte
+
+ for {
+ if i >= inLen {
+ break
+ }
+ if in[i] == '\\' && i+1 < inLen {
+ switch in[i+1] {
+ case ',':
+ out = append(out, ',')
+ i += 2
+ continue
+ case '"':
+ out = append(out, '"')
+ i += 2
+ continue
+ case ' ':
+ out = append(out, ' ')
+ i += 2
+ continue
+ case '=':
+ out = append(out, '=')
+ i += 2
+ continue
+ }
+ }
+ out = append(out, in[i])
+ i += 1
+ }
+ return out
+}
diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/strings.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/strings.go
new file mode 100644
index 0000000..330fbf4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/strings.go
@@ -0,0 +1,34 @@
+package escape
+
+import "strings"
+
+var (
+ Codes = map[byte][]byte{
+ ',': []byte(`\,`),
+ '"': []byte(`\"`),
+ ' ': []byte(`\ `),
+ '=': []byte(`\=`),
+ }
+
+ codesStr = map[string]string{}
+)
+
+func init() {
+ for k, v := range Codes {
+ codesStr[string(k)] = string(v)
+ }
+}
+
+func UnescapeString(in string) string {
+ for b, esc := range codesStr {
+ in = strings.Replace(in, esc, b, -1)
+ }
+ return in
+}
+
+func String(in string) string {
+ for b, esc := range codesStr {
+ in = strings.Replace(in, b, esc, -1)
+ }
+ return in
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore
new file mode 100644
index 0000000..531fcc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore
@@ -0,0 +1,4 @@
+jpgo
+jmespath-fuzz.zip
+cpu.out
+go-jmespath.test
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml
new file mode 100644
index 0000000..1f98077
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+sudo: false
+
+go:
+ - 1.4
+
+install: go get -v -t ./...
+script: make test
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 0000000..b03310a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile
new file mode 100644
index 0000000..a828d28
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " test to run all the tests"
+ @echo " build to build the library and jp executable"
+ @echo " generate to run codegen"
+
+
+generate:
+ go generate ./...
+
+build:
+ rm -f $(CMD)
+ go build ./...
+ rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+ mv cmd/$(CMD)/$(CMD) .
+
+test:
+ go test -v ./...
+
+check:
+ go vet ./...
+ @echo "golint ./..."
+ @lint=`golint ./...`; \
+ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+ echo "$$lint"; \
+ if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+ go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+ go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+ go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+ go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+ go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 0000000..187ef67
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 0000000..9cfa988
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 0000000..1cd2d23
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json
new file mode 100644
index 0000000..d550e96
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json
@@ -0,0 +1,96 @@
+[{
+ "given":
+ {"foo": {"bar": {"baz": "correct"}}},
+ "cases": [
+ {
+ "expression": "foo",
+ "result": {"bar": {"baz": "correct"}}
+ },
+ {
+ "expression": "foo.bar",
+ "result": {"baz": "correct"}
+ },
+ {
+ "expression": "foo.bar.baz",
+ "result": "correct"
+ },
+ {
+ "expression": "foo\n.\nbar\n.baz",
+ "result": "correct"
+ },
+ {
+ "expression": "foo.bar.baz.bad",
+ "result": null
+ },
+ {
+ "expression": "foo.bar.bad",
+ "result": null
+ },
+ {
+ "expression": "foo.bad",
+ "result": null
+ },
+ {
+ "expression": "bad",
+ "result": null
+ },
+ {
+ "expression": "bad.morebad.morebad",
+ "result": null
+ }
+ ]
+},
+{
+ "given":
+ {"foo": {"bar": ["one", "two", "three"]}},
+ "cases": [
+ {
+ "expression": "foo",
+ "result": {"bar": ["one", "two", "three"]}
+ },
+ {
+ "expression": "foo.bar",
+ "result": ["one", "two", "three"]
+ }
+ ]
+},
+{
+ "given": ["one", "two", "three"],
+ "cases": [
+ {
+ "expression": "one",
+ "result": null
+ },
+ {
+ "expression": "two",
+ "result": null
+ },
+ {
+ "expression": "three",
+ "result": null
+ },
+ {
+ "expression": "one.two",
+ "result": null
+ }
+ ]
+},
+{
+ "given":
+ {"foo": {"1": ["one", "two", "three"], "-1": "bar"}},
+ "cases": [
+ {
+ "expression": "foo.\"1\"",
+ "result": ["one", "two", "three"]
+ },
+ {
+ "expression": "foo.\"1\"[0]",
+ "result": "one"
+ },
+ {
+ "expression": "foo.\"-1\"",
+ "result": "bar"
+ }
+ ]
+}
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json
new file mode 100644
index 0000000..e3fa196
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json
@@ -0,0 +1,257 @@
+[
+ {
+ "given": {
+ "outer": {
+ "foo": "foo",
+ "bar": "bar",
+ "baz": "baz"
+ }
+ },
+ "cases": [
+ {
+ "expression": "outer.foo || outer.bar",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.foo||outer.bar",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.bar || outer.baz",
+ "result": "bar"
+ },
+ {
+ "expression": "outer.bar||outer.baz",
+ "result": "bar"
+ },
+ {
+ "expression": "outer.bad || outer.foo",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.bad||outer.foo",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.foo || outer.bad",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.foo||outer.bad",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.bad || outer.alsobad",
+ "result": null
+ },
+ {
+ "expression": "outer.bad||outer.alsobad",
+ "result": null
+ }
+ ]
+ },
+ {
+ "given": {
+ "outer": {
+ "foo": "foo",
+ "bool": false,
+ "empty_list": [],
+ "empty_string": ""
+ }
+ },
+ "cases": [
+ {
+ "expression": "outer.empty_string || outer.foo",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo",
+ "result": "foo"
+ }
+ ]
+ },
+ {
+ "given": {
+ "True": true,
+ "False": false,
+ "Number": 5,
+ "EmptyList": [],
+ "Zero": 0
+ },
+ "cases": [
+ {
+ "expression": "True && False",
+ "result": false
+ },
+ {
+ "expression": "False && True",
+ "result": false
+ },
+ {
+ "expression": "True && True",
+ "result": true
+ },
+ {
+ "expression": "False && False",
+ "result": false
+ },
+ {
+ "expression": "True && Number",
+ "result": 5
+ },
+ {
+ "expression": "Number && True",
+ "result": true
+ },
+ {
+ "expression": "Number && False",
+ "result": false
+ },
+ {
+ "expression": "Number && EmptyList",
+ "result": []
+ },
+ {
+ "expression": "Number && True",
+ "result": true
+ },
+ {
+ "expression": "EmptyList && True",
+ "result": []
+ },
+ {
+ "expression": "EmptyList && False",
+ "result": []
+ },
+ {
+ "expression": "True || False",
+ "result": true
+ },
+ {
+ "expression": "True || True",
+ "result": true
+ },
+ {
+ "expression": "False || True",
+ "result": true
+ },
+ {
+ "expression": "False || False",
+ "result": false
+ },
+ {
+ "expression": "Number || EmptyList",
+ "result": 5
+ },
+ {
+ "expression": "Number || True",
+ "result": 5
+ },
+ {
+ "expression": "Number || True && False",
+ "result": 5
+ },
+ {
+ "expression": "(Number || True) && False",
+ "result": false
+ },
+ {
+ "expression": "Number || (True && False)",
+ "result": 5
+ },
+ {
+ "expression": "!True",
+ "result": false
+ },
+ {
+ "expression": "!False",
+ "result": true
+ },
+ {
+ "expression": "!Number",
+ "result": false
+ },
+ {
+ "expression": "!EmptyList",
+ "result": true
+ },
+ {
+ "expression": "True && !False",
+ "result": true
+ },
+ {
+ "expression": "True && !EmptyList",
+ "result": true
+ },
+ {
+ "expression": "!False && !EmptyList",
+ "result": true
+ },
+ {
+ "expression": "!(True && False)",
+ "result": true
+ },
+ {
+ "expression": "!Zero",
+ "result": false
+ },
+ {
+ "expression": "!!Zero",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "one": 1,
+ "two": 2,
+ "three": 3
+ },
+ "cases": [
+ {
+ "expression": "one < two",
+ "result": true
+ },
+ {
+ "expression": "one <= two",
+ "result": true
+ },
+ {
+ "expression": "one == one",
+ "result": true
+ },
+ {
+ "expression": "one == two",
+ "result": false
+ },
+ {
+ "expression": "one > two",
+ "result": false
+ },
+ {
+ "expression": "one >= two",
+ "result": false
+ },
+ {
+ "expression": "one != two",
+ "result": true
+ },
+ {
+ "expression": "one < two && three > one",
+ "result": true
+ },
+ {
+ "expression": "one < two || three > one",
+ "result": true
+ },
+ {
+ "expression": "one < two || three < one",
+ "result": true
+ },
+ {
+ "expression": "two < one || three < one",
+ "result": false
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json
new file mode 100644
index 0000000..0c26248
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json
@@ -0,0 +1,25 @@
+[
+ {
+ "given": {
+ "foo": [{"name": "a"}, {"name": "b"}],
+ "bar": {"baz": "qux"}
+ },
+ "cases": [
+ {
+ "expression": "@",
+ "result": {
+ "foo": [{"name": "a"}, {"name": "b"}],
+ "bar": {"baz": "qux"}
+ }
+ },
+ {
+ "expression": "@.bar",
+ "result": {"baz": "qux"}
+ },
+ {
+ "expression": "@.foo[0]",
+ "result": {"name": "a"}
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json
new file mode 100644
index 0000000..4a62d95
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json
@@ -0,0 +1,46 @@
+[{
+ "given": {
+ "foo.bar": "dot",
+ "foo bar": "space",
+ "foo\nbar": "newline",
+ "foo\"bar": "doublequote",
+ "c:\\\\windows\\path": "windows",
+ "/unix/path": "unix",
+ "\"\"\"": "threequotes",
+ "bar": {"baz": "qux"}
+ },
+ "cases": [
+ {
+ "expression": "\"foo.bar\"",
+ "result": "dot"
+ },
+ {
+ "expression": "\"foo bar\"",
+ "result": "space"
+ },
+ {
+ "expression": "\"foo\\nbar\"",
+ "result": "newline"
+ },
+ {
+ "expression": "\"foo\\\"bar\"",
+ "result": "doublequote"
+ },
+ {
+ "expression": "\"c:\\\\\\\\windows\\\\path\"",
+ "result": "windows"
+ },
+ {
+ "expression": "\"/unix/path\"",
+ "result": "unix"
+ },
+ {
+ "expression": "\"\\\"\\\"\\\"\"",
+ "result": "threequotes"
+ },
+ {
+ "expression": "\"bar\".\"baz\"",
+ "result": "qux"
+ }
+ ]
+}]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json
new file mode 100644
index 0000000..5b9f52b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json
@@ -0,0 +1,468 @@
+[
+ {
+ "given": {"foo": [{"name": "a"}, {"name": "b"}]},
+ "cases": [
+ {
+ "comment": "Matching a literal",
+ "expression": "foo[?name == 'a']",
+ "result": [{"name": "a"}]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [0, 1], "bar": [2, 3]},
+ "cases": [
+ {
+ "comment": "Matching a literal",
+ "expression": "*[?[0] == `0`]",
+ "result": [[], []]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"first": "foo", "last": "bar"},
+ {"first": "foo", "last": "foo"},
+ {"first": "foo", "last": "baz"}]},
+ "cases": [
+ {
+ "comment": "Matching an expression",
+ "expression": "foo[?first == last]",
+ "result": [{"first": "foo", "last": "foo"}]
+ },
+ {
+ "comment": "Verify projection created from filter",
+ "expression": "foo[?first == last].first",
+ "result": ["foo"]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"age": 20},
+ {"age": 25},
+ {"age": 30}]},
+ "cases": [
+ {
+ "comment": "Greater than with a number",
+ "expression": "foo[?age > `25`]",
+ "result": [{"age": 30}]
+ },
+ {
+ "expression": "foo[?age >= `25`]",
+ "result": [{"age": 25}, {"age": 30}]
+ },
+ {
+ "comment": "Greater than with a number",
+ "expression": "foo[?age > `30`]",
+ "result": []
+ },
+ {
+ "comment": "Greater than with a number",
+ "expression": "foo[?age < `25`]",
+ "result": [{"age": 20}]
+ },
+ {
+ "comment": "Greater than with a number",
+ "expression": "foo[?age <= `25`]",
+ "result": [{"age": 20}, {"age": 25}]
+ },
+ {
+ "comment": "Greater than with a number",
+ "expression": "foo[?age < `20`]",
+ "result": []
+ },
+ {
+ "expression": "foo[?age == `20`]",
+ "result": [{"age": 20}]
+ },
+ {
+ "expression": "foo[?age != `20`]",
+ "result": [{"age": 25}, {"age": 30}]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"top": {"name": "a"}},
+ {"top": {"name": "b"}}]},
+ "cases": [
+ {
+ "comment": "Filter with subexpression",
+ "expression": "foo[?top.name == 'a']",
+ "result": [{"top": {"name": "a"}}]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"top": {"first": "foo", "last": "bar"}},
+ {"top": {"first": "foo", "last": "foo"}},
+ {"top": {"first": "foo", "last": "baz"}}]},
+ "cases": [
+ {
+ "comment": "Matching an expression",
+ "expression": "foo[?top.first == top.last]",
+ "result": [{"top": {"first": "foo", "last": "foo"}}]
+ },
+ {
+ "comment": "Matching a JSON array",
+ "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]",
+ "result": [{"top": {"first": "foo", "last": "bar"}}]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [
+ {"key": true},
+ {"key": false},
+ {"key": 0},
+ {"key": 1},
+ {"key": [0]},
+ {"key": {"bar": [0]}},
+ {"key": null},
+ {"key": [1]},
+ {"key": {"a":2}}
+ ]},
+ "cases": [
+ {
+ "expression": "foo[?key == `true`]",
+ "result": [{"key": true}]
+ },
+ {
+ "expression": "foo[?key == `false`]",
+ "result": [{"key": false}]
+ },
+ {
+ "expression": "foo[?key == `0`]",
+ "result": [{"key": 0}]
+ },
+ {
+ "expression": "foo[?key == `1`]",
+ "result": [{"key": 1}]
+ },
+ {
+ "expression": "foo[?key == `[0]`]",
+ "result": [{"key": [0]}]
+ },
+ {
+ "expression": "foo[?key == `{\"bar\": [0]}`]",
+ "result": [{"key": {"bar": [0]}}]
+ },
+ {
+ "expression": "foo[?key == `null`]",
+ "result": [{"key": null}]
+ },
+ {
+ "expression": "foo[?key == `[1]`]",
+ "result": [{"key": [1]}]
+ },
+ {
+ "expression": "foo[?key == `{\"a\":2}`]",
+ "result": [{"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`true` == key]",
+ "result": [{"key": true}]
+ },
+ {
+ "expression": "foo[?`false` == key]",
+ "result": [{"key": false}]
+ },
+ {
+ "expression": "foo[?`0` == key]",
+ "result": [{"key": 0}]
+ },
+ {
+ "expression": "foo[?`1` == key]",
+ "result": [{"key": 1}]
+ },
+ {
+ "expression": "foo[?`[0]` == key]",
+ "result": [{"key": [0]}]
+ },
+ {
+ "expression": "foo[?`{\"bar\": [0]}` == key]",
+ "result": [{"key": {"bar": [0]}}]
+ },
+ {
+ "expression": "foo[?`null` == key]",
+ "result": [{"key": null}]
+ },
+ {
+ "expression": "foo[?`[1]` == key]",
+ "result": [{"key": [1]}]
+ },
+ {
+ "expression": "foo[?`{\"a\":2}` == key]",
+ "result": [{"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `true`]",
+ "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `false`]",
+ "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `0`]",
+ "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `1`]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `null`]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `[1]`]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?key != `{\"a\":2}`]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}]
+ },
+ {
+ "expression": "foo[?`true` != key]",
+ "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`false` != key]",
+ "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`0` != key]",
+ "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`1` != key]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`null` != key]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`[1]` != key]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}]
+ },
+ {
+ "expression": "foo[?`{\"a\":2}` != key]",
+ "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]},
+ {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}]
+ }
+ ]
+ },
+ {
+ "given": {"reservations": [
+ {"instances": [
+ {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3},
+ {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]},
+ "cases": [
+ {
+ "expression": "reservations[].instances[?bar==`1`]",
+ "result": [[{"foo": 2, "bar": 1}]]
+ },
+ {
+ "expression": "reservations[*].instances[?bar==`1`]",
+ "result": [[{"foo": 2, "bar": 1}]]
+ },
+ {
+ "expression": "reservations[].instances[?bar==`1`][]",
+ "result": [{"foo": 2, "bar": 1}]
+ }
+ ]
+ },
+ {
+ "given": {
+ "baz": "other",
+ "foo": [
+ {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2}
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo[?bar==`1`].bar[0]",
+ "result": []
+ }
+ ]
+ },
+ {
+ "given": {
+ "foo": [
+ {"a": 1, "b": {"c": "x"}},
+ {"a": 1, "b": {"c": "y"}},
+ {"a": 1, "b": {"c": "z"}},
+ {"a": 2, "b": {"c": "z"}},
+ {"a": 1, "baz": 2}
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo[?a==`1`].b.c",
+ "result": ["x", "y", "z"]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]},
+ "cases": [
+ {
+ "comment": "Filter with or expression",
+ "expression": "foo[?name == 'a' || name == 'b']",
+ "result": [{"name": "a"}, {"name": "b"}]
+ },
+ {
+ "expression": "foo[?name == 'a' || name == 'e']",
+ "result": [{"name": "a"}]
+ },
+ {
+ "expression": "foo[?name == 'a' || name == 'b' || name == 'c']",
+ "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]},
+ "cases": [
+ {
+ "comment": "Filter with and expression",
+ "expression": "foo[?a == `1` && b == `2`]",
+ "result": [{"a": 1, "b": 2}]
+ },
+ {
+ "expression": "foo[?a == `1` && b == `4`]",
+ "result": []
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]},
+ "cases": [
+ {
+ "comment": "Filter with Or and And expressions",
+ "expression": "foo[?c == `3` || a == `1` && b == `4`]",
+ "result": [{"a": 1, "b": 2, "c": 3}]
+ },
+ {
+ "expression": "foo[?b == `2` || a == `3` && b == `4`]",
+ "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]
+ },
+ {
+ "expression": "foo[?a == `3` && b == `4` || b == `2`]",
+ "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]
+ },
+ {
+ "expression": "foo[?(a == `3` && b == `4`) || b == `2`]",
+ "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]
+ },
+ {
+ "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]",
+ "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]
+ },
+ {
+ "expression": "foo[?a == `3` && (b == `4` || b == `2`)]",
+ "result": [{"a": 3, "b": 4}]
+ },
+ {
+ "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]",
+ "result": [{"a": 3, "b": 4}]
+ }
+ ]
+ },
+ {
+ "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]},
+ "cases": [
+ {
+ "comment": "Verify precedence of or/and expressions",
+ "expression": "foo[?a == `1` || b ==`2` && c == `5`]",
+ "result": [{"a": 1, "b": 2, "c": 3}]
+ },
+ {
+ "comment": "Parentheses can alter precedence",
+ "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]",
+ "result": []
+ },
+ {
+ "comment": "Not expressions combined with and/or",
+ "expression": "foo[?!(a == `1` || b ==`2`)]",
+ "result": [{"a": 3, "b": 4}]
+ }
+ ]
+ },
+ {
+ "given": {
+ "foo": [
+ {"key": true},
+ {"key": false},
+ {"key": []},
+ {"key": {}},
+ {"key": [0]},
+ {"key": {"a": "b"}},
+ {"key": 0},
+ {"key": 1},
+ {"key": null},
+ {"notkey": true}
+ ]
+ },
+ "cases": [
+ {
+ "comment": "Unary filter expression",
+ "expression": "foo[?key]",
+ "result": [
+ {"key": true}, {"key": [0]}, {"key": {"a": "b"}},
+ {"key": 0}, {"key": 1}
+ ]
+ },
+ {
+ "comment": "Unary not filter expression",
+ "expression": "foo[?!key]",
+ "result": [
+ {"key": false}, {"key": []}, {"key": {}},
+ {"key": null}, {"notkey": true}
+ ]
+ },
+ {
+ "comment": "Equality with null RHS",
+ "expression": "foo[?key == `null`]",
+ "result": [
+ {"key": null}, {"notkey": true}
+ ]
+ }
+ ]
+ },
+ {
+ "given": {
+ "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ "cases": [
+ {
+ "comment": "Using @ in a filter expression",
+ "expression": "foo[?@ < `5`]",
+ "result": [0, 1, 2, 3, 4]
+ },
+ {
+ "comment": "Using @ in a filter expression",
+ "expression": "foo[?`5` > @]",
+ "result": [0, 1, 2, 3, 4]
+ },
+ {
+ "comment": "Using @ in a filter expression",
+ "expression": "foo[?@ == @]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json
new file mode 100644
index 0000000..8b8db36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json
@@ -0,0 +1,825 @@
+[{
+ "given":
+ {
+ "foo": -1,
+ "zero": 0,
+ "numbers": [-1, 3, 4, 5],
+ "array": [-1, 3, 4, 5, "a", "100"],
+ "strings": ["a", "b", "c"],
+ "decimals": [1.01, 1.2, -1.5],
+ "str": "Str",
+ "false": false,
+ "empty_list": [],
+ "empty_hash": {},
+ "objects": {"foo": "bar", "bar": "baz"},
+ "null_key": null
+ },
+ "cases": [
+ {
+ "expression": "abs(foo)",
+ "result": 1
+ },
+ {
+ "expression": "abs(foo)",
+ "result": 1
+ },
+ {
+ "expression": "abs(str)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "abs(array[1])",
+ "result": 3
+ },
+ {
+ "expression": "abs(array[1])",
+ "result": 3
+ },
+ {
+ "expression": "abs(`false`)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "abs(`-24`)",
+ "result": 24
+ },
+ {
+ "expression": "abs(`-24`)",
+ "result": 24
+ },
+ {
+ "expression": "abs(`1`, `2`)",
+ "error": "invalid-arity"
+ },
+ {
+ "expression": "abs()",
+ "error": "invalid-arity"
+ },
+ {
+ "expression": "unknown_function(`1`, `2`)",
+ "error": "unknown-function"
+ },
+ {
+ "expression": "avg(numbers)",
+ "result": 2.75
+ },
+ {
+ "expression": "avg(array)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "avg('abc')",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "avg(foo)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "avg(@)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "avg(strings)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "ceil(`1.2`)",
+ "result": 2
+ },
+ {
+ "expression": "ceil(decimals[0])",
+ "result": 2
+ },
+ {
+ "expression": "ceil(decimals[1])",
+ "result": 2
+ },
+ {
+ "expression": "ceil(decimals[2])",
+ "result": -1
+ },
+ {
+ "expression": "ceil('string')",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "contains('abc', 'a')",
+ "result": true
+ },
+ {
+ "expression": "contains('abc', 'd')",
+ "result": false
+ },
+ {
+ "expression": "contains(`false`, 'd')",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "contains(strings, 'a')",
+ "result": true
+ },
+ {
+ "expression": "contains(decimals, `1.2`)",
+ "result": true
+ },
+ {
+ "expression": "contains(decimals, `false`)",
+ "result": false
+ },
+ {
+ "expression": "ends_with(str, 'r')",
+ "result": true
+ },
+ {
+ "expression": "ends_with(str, 'tr')",
+ "result": true
+ },
+ {
+ "expression": "ends_with(str, 'Str')",
+ "result": true
+ },
+ {
+ "expression": "ends_with(str, 'SStr')",
+ "result": false
+ },
+ {
+ "expression": "ends_with(str, 'foo')",
+ "result": false
+ },
+ {
+ "expression": "ends_with(str, `0`)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "floor(`1.2`)",
+ "result": 1
+ },
+ {
+ "expression": "floor('string')",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "floor(decimals[0])",
+ "result": 1
+ },
+ {
+ "expression": "floor(foo)",
+ "result": -1
+ },
+ {
+ "expression": "floor(str)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "length('abc')",
+ "result": 3
+ },
+ {
+ "expression": "length('✓foo')",
+ "result": 4
+ },
+ {
+ "expression": "length('')",
+ "result": 0
+ },
+ {
+ "expression": "length(@)",
+ "result": 12
+ },
+ {
+ "expression": "length(strings[0])",
+ "result": 1
+ },
+ {
+ "expression": "length(str)",
+ "result": 3
+ },
+ {
+ "expression": "length(array)",
+ "result": 6
+ },
+ {
+ "expression": "length(objects)",
+ "result": 2
+ },
+ {
+ "expression": "length(`false`)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "length(foo)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "length(strings[0])",
+ "result": 1
+ },
+ {
+ "expression": "max(numbers)",
+ "result": 5
+ },
+ {
+ "expression": "max(decimals)",
+ "result": 1.2
+ },
+ {
+ "expression": "max(strings)",
+ "result": "c"
+ },
+ {
+ "expression": "max(abc)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "max(array)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "max(decimals)",
+ "result": 1.2
+ },
+ {
+ "expression": "max(empty_list)",
+ "result": null
+ },
+ {
+ "expression": "merge(`{}`)",
+ "result": {}
+ },
+ {
+ "expression": "merge(`{}`, `{}`)",
+ "result": {}
+ },
+ {
+ "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)",
+ "result": {"a": 1, "b": 2}
+ },
+ {
+ "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)",
+ "result": {"a": 2}
+ },
+ {
+ "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)",
+ "result": {"a": 2, "b": 2, "c": 3, "d": 4}
+ },
+ {
+ "expression": "min(numbers)",
+ "result": -1
+ },
+ {
+ "expression": "min(decimals)",
+ "result": -1.5
+ },
+ {
+ "expression": "min(abc)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "min(array)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "min(empty_list)",
+ "result": null
+ },
+ {
+ "expression": "min(decimals)",
+ "result": -1.5
+ },
+ {
+ "expression": "min(strings)",
+ "result": "a"
+ },
+ {
+ "expression": "type('abc')",
+ "result": "string"
+ },
+ {
+ "expression": "type(`1.0`)",
+ "result": "number"
+ },
+ {
+ "expression": "type(`2`)",
+ "result": "number"
+ },
+ {
+ "expression": "type(`true`)",
+ "result": "boolean"
+ },
+ {
+ "expression": "type(`false`)",
+ "result": "boolean"
+ },
+ {
+ "expression": "type(`null`)",
+ "result": "null"
+ },
+ {
+ "expression": "type(`[0]`)",
+ "result": "array"
+ },
+ {
+ "expression": "type(`{\"a\": \"b\"}`)",
+ "result": "object"
+ },
+ {
+ "expression": "type(@)",
+ "result": "object"
+ },
+ {
+ "expression": "sort(keys(objects))",
+ "result": ["bar", "foo"]
+ },
+ {
+ "expression": "keys(foo)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "keys(strings)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "keys(`false`)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort(values(objects))",
+ "result": ["bar", "baz"]
+ },
+ {
+ "expression": "keys(empty_hash)",
+ "result": []
+ },
+ {
+ "expression": "values(foo)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "join(', ', strings)",
+ "result": "a, b, c"
+ },
+ {
+ "expression": "join(', ', strings)",
+ "result": "a, b, c"
+ },
+ {
+ "expression": "join(',', `[\"a\", \"b\"]`)",
+ "result": "a,b"
+ },
+ {
+ "expression": "join(',', `[\"a\", 0]`)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "join(', ', str)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "join('|', strings)",
+ "result": "a|b|c"
+ },
+ {
+ "expression": "join(`2`, strings)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "join('|', decimals)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "join('|', decimals[].to_string(@))",
+ "result": "1.01|1.2|-1.5"
+ },
+ {
+ "expression": "join('|', empty_list)",
+ "result": ""
+ },
+ {
+ "expression": "reverse(numbers)",
+ "result": [5, 4, 3, -1]
+ },
+ {
+ "expression": "reverse(array)",
+ "result": ["100", "a", 5, 4, 3, -1]
+ },
+ {
+ "expression": "reverse(`[]`)",
+ "result": []
+ },
+ {
+ "expression": "reverse('')",
+ "result": ""
+ },
+ {
+ "expression": "reverse('hello world')",
+ "result": "dlrow olleh"
+ },
+ {
+ "expression": "starts_with(str, 'S')",
+ "result": true
+ },
+ {
+ "expression": "starts_with(str, 'St')",
+ "result": true
+ },
+ {
+ "expression": "starts_with(str, 'Str')",
+ "result": true
+ },
+ {
+ "expression": "starts_with(str, 'String')",
+ "result": false
+ },
+ {
+ "expression": "starts_with(str, `0`)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sum(numbers)",
+ "result": 11
+ },
+ {
+ "expression": "sum(decimals)",
+ "result": 0.71
+ },
+ {
+ "expression": "sum(array)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sum(array[].to_number(@))",
+ "result": 111
+ },
+ {
+ "expression": "sum(`[]`)",
+ "result": 0
+ },
+ {
+ "expression": "to_array('foo')",
+ "result": ["foo"]
+ },
+ {
+ "expression": "to_array(`0`)",
+ "result": [0]
+ },
+ {
+ "expression": "to_array(objects)",
+ "result": [{"foo": "bar", "bar": "baz"}]
+ },
+ {
+ "expression": "to_array(`[1, 2, 3]`)",
+ "result": [1, 2, 3]
+ },
+ {
+ "expression": "to_array(false)",
+ "result": [false]
+ },
+ {
+ "expression": "to_string('foo')",
+ "result": "foo"
+ },
+ {
+ "expression": "to_string(`1.2`)",
+ "result": "1.2"
+ },
+ {
+ "expression": "to_string(`[0, 1]`)",
+ "result": "[0,1]"
+ },
+ {
+ "expression": "to_number('1.0')",
+ "result": 1.0
+ },
+ {
+ "expression": "to_number('1.1')",
+ "result": 1.1
+ },
+ {
+ "expression": "to_number('4')",
+ "result": 4
+ },
+ {
+ "expression": "to_number('notanumber')",
+ "result": null
+ },
+ {
+ "expression": "to_number(`false`)",
+ "result": null
+ },
+ {
+ "expression": "to_number(`null`)",
+ "result": null
+ },
+ {
+ "expression": "to_number(`[0]`)",
+ "result": null
+ },
+ {
+ "expression": "to_number(`{\"foo\": 0}`)",
+ "result": null
+ },
+ {
+ "expression": "\"to_string\"(`1.0`)",
+ "error": "syntax"
+ },
+ {
+ "expression": "sort(numbers)",
+ "result": [-1, 3, 4, 5]
+ },
+ {
+ "expression": "sort(strings)",
+ "result": ["a", "b", "c"]
+ },
+ {
+ "expression": "sort(decimals)",
+ "result": [-1.5, 1.01, 1.2]
+ },
+ {
+ "expression": "sort(array)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort(abc)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort(empty_list)",
+ "result": []
+ },
+ {
+ "expression": "sort(@)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "not_null(unknown_key, str)",
+ "result": "Str"
+ },
+ {
+ "expression": "not_null(unknown_key, foo.bar, empty_list, str)",
+ "result": []
+ },
+ {
+ "expression": "not_null(unknown_key, null_key, empty_list, str)",
+ "result": []
+ },
+ {
+ "expression": "not_null(all, expressions, are_null)",
+ "result": null
+ },
+ {
+ "expression": "not_null()",
+ "error": "invalid-arity"
+ },
+ {
+ "description": "function projection on single arg function",
+ "expression": "numbers[].to_string(@)",
+ "result": ["-1", "3", "4", "5"]
+ },
+ {
+ "description": "function projection on single arg function",
+ "expression": "array[].to_number(@)",
+ "result": [-1, 3, 4, 5, 100]
+ }
+ ]
+}, {
+ "given":
+ {
+ "foo": [
+ {"b": "b", "a": "a"},
+ {"c": "c", "b": "b"},
+ {"d": "d", "c": "c"},
+ {"e": "e", "d": "d"},
+ {"f": "f", "e": "e"}
+ ]
+ },
+ "cases": [
+ {
+ "description": "function projection on variadic function",
+ "expression": "foo[].not_null(f, e, d, c, b, a)",
+ "result": ["b", "c", "d", "e", "f"]
+ }
+ ]
+}, {
+ "given":
+ {
+ "people": [
+ {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"},
+ {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"},
+ {"age": 30, "age_str": "30", "bool": true, "name": "c"},
+ {"age": 50, "age_str": "50", "bool": false, "name": "d"},
+ {"age": 10, "age_str": "10", "bool": true, "name": 3}
+ ]
+ },
+ "cases": [
+ {
+ "description": "sort by field expression",
+ "expression": "sort_by(people, &age)",
+ "result": [
+ {"age": 10, "age_str": "10", "bool": true, "name": 3},
+ {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"},
+ {"age": 30, "age_str": "30", "bool": true, "name": "c"},
+ {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"},
+ {"age": 50, "age_str": "50", "bool": false, "name": "d"}
+ ]
+ },
+ {
+ "expression": "sort_by(people, &age_str)",
+ "result": [
+ {"age": 10, "age_str": "10", "bool": true, "name": 3},
+ {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"},
+ {"age": 30, "age_str": "30", "bool": true, "name": "c"},
+ {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"},
+ {"age": 50, "age_str": "50", "bool": false, "name": "d"}
+ ]
+ },
+ {
+ "description": "sort by function expression",
+ "expression": "sort_by(people, &to_number(age_str))",
+ "result": [
+ {"age": 10, "age_str": "10", "bool": true, "name": 3},
+ {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"},
+ {"age": 30, "age_str": "30", "bool": true, "name": "c"},
+ {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"},
+ {"age": 50, "age_str": "50", "bool": false, "name": "d"}
+ ]
+ },
+ {
+ "description": "function projection on sort_by function",
+ "expression": "sort_by(people, &age)[].name",
+ "result": [3, "a", "c", "b", "d"]
+ },
+ {
+ "expression": "sort_by(people, &extra)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort_by(people, &bool)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort_by(people, &name)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort_by(people, name)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "sort_by(people, &age)[].extra",
+ "result": ["foo", "bar"]
+ },
+ {
+ "expression": "sort_by(`[]`, &age)",
+ "result": []
+ },
+ {
+ "expression": "max_by(people, &age)",
+ "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"}
+ },
+ {
+ "expression": "max_by(people, &age_str)",
+ "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"}
+ },
+ {
+ "expression": "max_by(people, &bool)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "max_by(people, &extra)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "max_by(people, &to_number(age_str))",
+ "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"}
+ },
+ {
+ "expression": "min_by(people, &age)",
+ "result": {"age": 10, "age_str": "10", "bool": true, "name": 3}
+ },
+ {
+ "expression": "min_by(people, &age_str)",
+ "result": {"age": 10, "age_str": "10", "bool": true, "name": 3}
+ },
+ {
+ "expression": "min_by(people, &bool)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "min_by(people, &extra)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "min_by(people, &to_number(age_str))",
+ "result": {"age": 10, "age_str": "10", "bool": true, "name": 3}
+ }
+ ]
+}, {
+ "given":
+ {
+ "people": [
+ {"age": 10, "order": "1"},
+ {"age": 10, "order": "2"},
+ {"age": 10, "order": "3"},
+ {"age": 10, "order": "4"},
+ {"age": 10, "order": "5"},
+ {"age": 10, "order": "6"},
+ {"age": 10, "order": "7"},
+ {"age": 10, "order": "8"},
+ {"age": 10, "order": "9"},
+ {"age": 10, "order": "10"},
+ {"age": 10, "order": "11"}
+ ]
+ },
+ "cases": [
+ {
+ "description": "stable sort order",
+ "expression": "sort_by(people, &age)",
+ "result": [
+ {"age": 10, "order": "1"},
+ {"age": 10, "order": "2"},
+ {"age": 10, "order": "3"},
+ {"age": 10, "order": "4"},
+ {"age": 10, "order": "5"},
+ {"age": 10, "order": "6"},
+ {"age": 10, "order": "7"},
+ {"age": 10, "order": "8"},
+ {"age": 10, "order": "9"},
+ {"age": 10, "order": "10"},
+ {"age": 10, "order": "11"}
+ ]
+ }
+ ]
+}, {
+ "given":
+ {
+ "people": [
+ {"a": 10, "b": 1, "c": "z"},
+ {"a": 10, "b": 2, "c": null},
+ {"a": 10, "b": 3},
+ {"a": 10, "b": 4, "c": "z"},
+ {"a": 10, "b": 5, "c": null},
+ {"a": 10, "b": 6},
+ {"a": 10, "b": 7, "c": "z"},
+ {"a": 10, "b": 8, "c": null},
+ {"a": 10, "b": 9}
+ ],
+ "empty": []
+ },
+ "cases": [
+ {
+ "expression": "map(&a, people)",
+ "result": [10, 10, 10, 10, 10, 10, 10, 10, 10]
+ },
+ {
+ "expression": "map(&c, people)",
+ "result": ["z", null, null, "z", null, null, "z", null, null]
+ },
+ {
+ "expression": "map(&a, badkey)",
+ "error": "invalid-type"
+ },
+ {
+ "expression": "map(&foo, empty)",
+ "result": []
+ }
+ ]
+}, {
+ "given": {
+ "array": [
+ {
+ "foo": {"bar": "yes1"}
+ },
+ {
+ "foo": {"bar": "yes2"}
+ },
+ {
+ "foo1": {"bar": "no"}
+ }
+ ]},
+ "cases": [
+ {
+ "expression": "map(&foo.bar, array)",
+ "result": ["yes1", "yes2", null]
+ },
+ {
+ "expression": "map(&foo1.bar, array)",
+ "result": [null, null, "no"]
+ },
+ {
+ "expression": "map(&foo.bar.baz, array)",
+ "result": [null, null, null]
+ }
+ ]
+}, {
+ "given": {
+ "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]]
+ },
+ "cases": [
+ {
+ "expression": "map(&[], array)",
+ "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]]
+ }
+ ]
+}
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json
new file mode 100644
index 0000000..7998a41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json
@@ -0,0 +1,1377 @@
+[
+ {
+ "given": {
+ "__L": true
+ },
+ "cases": [
+ {
+ "expression": "__L",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "!\r": true
+ },
+ "cases": [
+ {
+ "expression": "\"!\\r\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "Y_1623": true
+ },
+ "cases": [
+ {
+ "expression": "Y_1623",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "x": true
+ },
+ "cases": [
+ {
+ "expression": "x",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\tF\uCebb": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\tF\\uCebb\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ " \t": true
+ },
+ "cases": [
+ {
+ "expression": "\" \\t\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ " ": true
+ },
+ "cases": [
+ {
+ "expression": "\" \"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "v2": true
+ },
+ "cases": [
+ {
+ "expression": "v2",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\t": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\t\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_X": true
+ },
+ "cases": [
+ {
+ "expression": "_X",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\t4\ud9da\udd15": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\t4\\ud9da\\udd15\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "v24_W": true
+ },
+ "cases": [
+ {
+ "expression": "v24_W",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "H": true
+ },
+ "cases": [
+ {
+ "expression": "\"H\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\f": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\f\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "E4": true
+ },
+ "cases": [
+ {
+ "expression": "\"E4\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "!": true
+ },
+ "cases": [
+ {
+ "expression": "\"!\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "tM": true
+ },
+ "cases": [
+ {
+ "expression": "tM",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ " [": true
+ },
+ "cases": [
+ {
+ "expression": "\" [\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "R!": true
+ },
+ "cases": [
+ {
+ "expression": "\"R!\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_6W": true
+ },
+ "cases": [
+ {
+ "expression": "_6W",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\uaBA1\r": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\uaBA1\\r\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "tL7": true
+ },
+ "cases": [
+ {
+ "expression": "tL7",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "<": true
+ },
+ "cases": [
+ {
+ "expression": "\">\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "hvu": true
+ },
+ "cases": [
+ {
+ "expression": "hvu",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "; !": true
+ },
+ "cases": [
+ {
+ "expression": "\"; !\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "hU": true
+ },
+ "cases": [
+ {
+ "expression": "hU",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "!I\n\/": true
+ },
+ "cases": [
+ {
+ "expression": "\"!I\\n\\/\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\uEEbF": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\uEEbF\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "U)\t": true
+ },
+ "cases": [
+ {
+ "expression": "\"U)\\t\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "fa0_9": true
+ },
+ "cases": [
+ {
+ "expression": "fa0_9",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "/": true
+ },
+ "cases": [
+ {
+ "expression": "\"/\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "Gy": true
+ },
+ "cases": [
+ {
+ "expression": "Gy",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\b": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\b\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "<": true
+ },
+ "cases": [
+ {
+ "expression": "\"<\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\t": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\t\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\t&\\\r": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\t&\\\\\\r\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "#": true
+ },
+ "cases": [
+ {
+ "expression": "\"#\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "B__": true
+ },
+ "cases": [
+ {
+ "expression": "B__",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\nS \n": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\nS \\n\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "Bp": true
+ },
+ "cases": [
+ {
+ "expression": "Bp",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ ",\t;": true
+ },
+ "cases": [
+ {
+ "expression": "\",\\t;\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "B_q": true
+ },
+ "cases": [
+ {
+ "expression": "B_q",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\/+\t\n\b!Z": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\/+\\t\\n\\b!Z\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\udadd\udfc7\\ueFAc": true
+ },
+ "cases": [
+ {
+ "expression": "\"\udadd\udfc7\\\\ueFAc\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ ":\f": true
+ },
+ "cases": [
+ {
+ "expression": "\":\\f\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\/": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\/\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_BW_6Hg_Gl": true
+ },
+ "cases": [
+ {
+ "expression": "_BW_6Hg_Gl",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\udbcf\udc02": true
+ },
+ "cases": [
+ {
+ "expression": "\"\udbcf\udc02\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "zs1DC": true
+ },
+ "cases": [
+ {
+ "expression": "zs1DC",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "__434": true
+ },
+ "cases": [
+ {
+ "expression": "__434",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\udb94\udd41": true
+ },
+ "cases": [
+ {
+ "expression": "\"\udb94\udd41\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "Z_5": true
+ },
+ "cases": [
+ {
+ "expression": "Z_5",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "z_M_": true
+ },
+ "cases": [
+ {
+ "expression": "z_M_",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "YU_2": true
+ },
+ "cases": [
+ {
+ "expression": "YU_2",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_0": true
+ },
+ "cases": [
+ {
+ "expression": "_0",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\b+": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\b+\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\"": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\\"\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "D7": true
+ },
+ "cases": [
+ {
+ "expression": "D7",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_62L": true
+ },
+ "cases": [
+ {
+ "expression": "_62L",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\tK\t": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\tK\\t\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\n\\\f": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\n\\\\\\f\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "I_": true
+ },
+ "cases": [
+ {
+ "expression": "I_",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "W_a0_": true
+ },
+ "cases": [
+ {
+ "expression": "W_a0_",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "BQ": true
+ },
+ "cases": [
+ {
+ "expression": "BQ",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\tX$\uABBb": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\tX$\\uABBb\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "Z9": true
+ },
+ "cases": [
+ {
+ "expression": "Z9",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\b%\"\uda38\udd0f": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\b%\\\"\uda38\udd0f\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_F": true
+ },
+ "cases": [
+ {
+ "expression": "_F",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "!,": true
+ },
+ "cases": [
+ {
+ "expression": "\"!,\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\"!": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\\"!\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "Hh": true
+ },
+ "cases": [
+ {
+ "expression": "Hh",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "&": true
+ },
+ "cases": [
+ {
+ "expression": "\"&\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "9\r\\R": true
+ },
+ "cases": [
+ {
+ "expression": "\"9\\r\\\\R\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "M_k": true
+ },
+ "cases": [
+ {
+ "expression": "M_k",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "!\b\n\udb06\ude52\"\"": true
+ },
+ "cases": [
+ {
+ "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "6": true
+ },
+ "cases": [
+ {
+ "expression": "\"6\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_7": true
+ },
+ "cases": [
+ {
+ "expression": "_7",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "0": true
+ },
+ "cases": [
+ {
+ "expression": "\"0\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\\8\\": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\\\8\\\\\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "b7eo": true
+ },
+ "cases": [
+ {
+ "expression": "b7eo",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "xIUo9": true
+ },
+ "cases": [
+ {
+ "expression": "xIUo9",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "5": true
+ },
+ "cases": [
+ {
+ "expression": "\"5\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "?": true
+ },
+ "cases": [
+ {
+ "expression": "\"?\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "sU": true
+ },
+ "cases": [
+ {
+ "expression": "sU",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "VH2&H\\\/": true
+ },
+ "cases": [
+ {
+ "expression": "\"VH2&H\\\\\\/\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_C": true
+ },
+ "cases": [
+ {
+ "expression": "_C",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "_": true
+ },
+ "cases": [
+ {
+ "expression": "_",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "<\t": true
+ },
+ "cases": [
+ {
+ "expression": "\"<\\t\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {
+ "\uD834\uDD1E": true
+ },
+ "cases": [
+ {
+ "expression": "\"\\uD834\\uDD1E\"",
+ "result": true
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json
new file mode 100644
index 0000000..aa03b35
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json
@@ -0,0 +1,346 @@
+[{
+ "given":
+ {"foo": {"bar": ["zero", "one", "two"]}},
+ "cases": [
+ {
+ "expression": "foo.bar[0]",
+ "result": "zero"
+ },
+ {
+ "expression": "foo.bar[1]",
+ "result": "one"
+ },
+ {
+ "expression": "foo.bar[2]",
+ "result": "two"
+ },
+ {
+ "expression": "foo.bar[3]",
+ "result": null
+ },
+ {
+ "expression": "foo.bar[-1]",
+ "result": "two"
+ },
+ {
+ "expression": "foo.bar[-2]",
+ "result": "one"
+ },
+ {
+ "expression": "foo.bar[-3]",
+ "result": "zero"
+ },
+ {
+ "expression": "foo.bar[-4]",
+ "result": null
+ }
+ ]
+},
+{
+ "given":
+ {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]},
+ "cases": [
+ {
+ "expression": "foo.bar",
+ "result": null
+ },
+ {
+ "expression": "foo[0].bar",
+ "result": "one"
+ },
+ {
+ "expression": "foo[1].bar",
+ "result": "two"
+ },
+ {
+ "expression": "foo[2].bar",
+ "result": "three"
+ },
+ {
+ "expression": "foo[3].notbar",
+ "result": "four"
+ },
+ {
+ "expression": "foo[3].bar",
+ "result": null
+ },
+ {
+ "expression": "foo[0]",
+ "result": {"bar": "one"}
+ },
+ {
+ "expression": "foo[1]",
+ "result": {"bar": "two"}
+ },
+ {
+ "expression": "foo[2]",
+ "result": {"bar": "three"}
+ },
+ {
+ "expression": "foo[3]",
+ "result": {"notbar": "four"}
+ },
+ {
+ "expression": "foo[4]",
+ "result": null
+ }
+ ]
+},
+{
+ "given": [
+ "one", "two", "three"
+ ],
+ "cases": [
+ {
+ "expression": "[0]",
+ "result": "one"
+ },
+ {
+ "expression": "[1]",
+ "result": "two"
+ },
+ {
+ "expression": "[2]",
+ "result": "three"
+ },
+ {
+ "expression": "[-1]",
+ "result": "three"
+ },
+ {
+ "expression": "[-2]",
+ "result": "two"
+ },
+ {
+ "expression": "[-3]",
+ "result": "one"
+ }
+ ]
+},
+{
+ "given": {"reservations": [
+ {"instances": [{"foo": 1}, {"foo": 2}]}
+ ]},
+ "cases": [
+ {
+ "expression": "reservations[].instances[].foo",
+ "result": [1, 2]
+ },
+ {
+ "expression": "reservations[].instances[].bar",
+ "result": []
+ },
+ {
+ "expression": "reservations[].notinstances[].foo",
+ "result": []
+ },
+ {
+ "expression": "reservations[].notinstances[].foo",
+ "result": []
+ }
+ ]
+},
+{
+ "given": {"reservations": [{
+ "instances": [
+ {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]},
+ {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]},
+ {"foo": "bar"},
+ {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]},
+ {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]},
+ {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]},
+ {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]}
+ ],
+ "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}
+ }, {
+ "instances": [
+ {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]},
+ {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]},
+ {"c": "bar"},
+ {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]},
+ {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]}
+ ],
+ "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}
+ }
+ ]},
+ "cases": [
+ {
+ "expression": "reservations[].instances[].foo[].bar",
+ "result": [1, 2, 4, 5, 6, 8]
+ },
+ {
+ "expression": "reservations[].instances[].foo[].baz",
+ "result": []
+ },
+ {
+ "expression": "reservations[].instances[].notfoo[].bar",
+ "result": [20, 21, 22, 23, 24, 25]
+ },
+ {
+ "expression": "reservations[].instances[].notfoo[].notbar",
+ "result": [[7], [7]]
+ },
+ {
+ "expression": "reservations[].notinstances[].foo",
+ "result": []
+ },
+ {
+ "expression": "reservations[].instances[].foo[].notbar",
+ "result": [3, [7]]
+ },
+ {
+ "expression": "reservations[].instances[].bar[].baz",
+ "result": [[1], [2], [3], [4]]
+ },
+ {
+ "expression": "reservations[].instances[].baz[].baz",
+ "result": [[1, 2], [], [], [3, 4]]
+ },
+ {
+ "expression": "reservations[].instances[].qux[].baz",
+ "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []]
+ },
+ {
+ "expression": "reservations[].instances[].qux[].baz[]",
+ "result": [1, 2, 3, 4, 1, 2, 3, 4]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [
+ [["one", "two"], ["three", "four"]],
+ [["five", "six"], ["seven", "eight"]],
+ [["nine"], ["ten"]]
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo[]",
+ "result": [["one", "two"], ["three", "four"], ["five", "six"],
+ ["seven", "eight"], ["nine"], ["ten"]]
+ },
+ {
+ "expression": "foo[][0]",
+ "result": ["one", "three", "five", "seven", "nine", "ten"]
+ },
+ {
+ "expression": "foo[][1]",
+ "result": ["two", "four", "six", "eight"]
+ },
+ {
+ "expression": "foo[][0][0]",
+ "result": []
+ },
+ {
+ "expression": "foo[][2][2]",
+ "result": []
+ },
+ {
+ "expression": "foo[][0][0][100]",
+ "result": []
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [{
+ "bar": [
+ {
+ "qux": 2,
+ "baz": 1
+ },
+ {
+ "qux": 4,
+ "baz": 3
+ }
+ ]
+ },
+ {
+ "bar": [
+ {
+ "qux": 6,
+ "baz": 5
+ },
+ {
+ "qux": 8,
+ "baz": 7
+ }
+ ]
+ }
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo",
+ "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]},
+ {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}]
+ },
+ {
+ "expression": "foo[]",
+ "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]},
+ {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}]
+ },
+ {
+ "expression": "foo[].bar",
+ "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}],
+ [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]]
+ },
+ {
+ "expression": "foo[].bar[]",
+ "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3},
+ {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]
+ },
+ {
+ "expression": "foo[].bar[].baz",
+ "result": [1, 3, 5, 7]
+ }
+ ]
+},
+{
+ "given": {
+ "string": "string",
+ "hash": {"foo": "bar", "bar": "baz"},
+ "number": 23,
+ "nullvalue": null
+ },
+ "cases": [
+ {
+ "expression": "string[]",
+ "result": null
+ },
+ {
+ "expression": "hash[]",
+ "result": null
+ },
+ {
+ "expression": "number[]",
+ "result": null
+ },
+ {
+ "expression": "nullvalue[]",
+ "result": null
+ },
+ {
+ "expression": "string[].foo",
+ "result": null
+ },
+ {
+ "expression": "hash[].foo",
+ "result": null
+ },
+ {
+ "expression": "number[].foo",
+ "result": null
+ },
+ {
+ "expression": "nullvalue[].foo",
+ "result": null
+ },
+ {
+ "expression": "nullvalue[].foo[].bar",
+ "result": null
+ }
+ ]
+}
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json
new file mode 100644
index 0000000..c6706b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json
@@ -0,0 +1,185 @@
+[
+ {
+ "given": {
+ "foo": [{"name": "a"}, {"name": "b"}],
+ "bar": {"baz": "qux"}
+ },
+ "cases": [
+ {
+ "expression": "`\"foo\"`",
+ "result": "foo"
+ },
+ {
+ "comment": "Interpret escaped unicode.",
+ "expression": "`\"\\u03a6\"`",
+ "result": "Φ"
+ },
+ {
+ "expression": "`\"✓\"`",
+ "result": "✓"
+ },
+ {
+ "expression": "`[1, 2, 3]`",
+ "result": [1, 2, 3]
+ },
+ {
+ "expression": "`{\"a\": \"b\"}`",
+ "result": {"a": "b"}
+ },
+ {
+ "expression": "`true`",
+ "result": true
+ },
+ {
+ "expression": "`false`",
+ "result": false
+ },
+ {
+ "expression": "`null`",
+ "result": null
+ },
+ {
+ "expression": "`0`",
+ "result": 0
+ },
+ {
+ "expression": "`1`",
+ "result": 1
+ },
+ {
+ "expression": "`2`",
+ "result": 2
+ },
+ {
+ "expression": "`3`",
+ "result": 3
+ },
+ {
+ "expression": "`4`",
+ "result": 4
+ },
+ {
+ "expression": "`5`",
+ "result": 5
+ },
+ {
+ "expression": "`6`",
+ "result": 6
+ },
+ {
+ "expression": "`7`",
+ "result": 7
+ },
+ {
+ "expression": "`8`",
+ "result": 8
+ },
+ {
+ "expression": "`9`",
+ "result": 9
+ },
+ {
+ "comment": "Escaping a backtick in quotes",
+ "expression": "`\"foo\\`bar\"`",
+ "result": "foo`bar"
+ },
+ {
+ "comment": "Double quote in literal",
+ "expression": "`\"foo\\\"bar\"`",
+ "result": "foo\"bar"
+ },
+ {
+ "expression": "`\"1\\`\"`",
+ "result": "1`"
+ },
+ {
+ "comment": "Multiple literal expressions with escapes",
+ "expression": "`\"\\\\\"`.{a:`\"b\"`}",
+ "result": {"a": "b"}
+ },
+ {
+ "comment": "literal . identifier",
+ "expression": "`{\"a\": \"b\"}`.a",
+ "result": "b"
+ },
+ {
+ "comment": "literal . identifier . identifier",
+ "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b",
+ "result": "c"
+ },
+ {
+ "comment": "literal . identifier bracket-expr",
+ "expression": "`[0, 1, 2]`[1]",
+ "result": 1
+ }
+ ]
+ },
+ {
+ "comment": "Literals",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "comment": "Literal with leading whitespace",
+ "expression": "` {\"foo\": true}`",
+ "result": {"foo": true}
+ },
+ {
+ "comment": "Literal with trailing whitespace",
+ "expression": "`{\"foo\": true} `",
+ "result": {"foo": true}
+ },
+ {
+ "comment": "Literal on RHS of subexpr not allowed",
+ "expression": "foo.`\"bar\"`",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Raw String Literals",
+ "given": {},
+ "cases": [
+ {
+ "expression": "'foo'",
+ "result": "foo"
+ },
+ {
+ "expression": "' foo '",
+ "result": " foo "
+ },
+ {
+ "expression": "'0'",
+ "result": "0"
+ },
+ {
+ "expression": "'newline\n'",
+ "result": "newline\n"
+ },
+ {
+ "expression": "'\n'",
+ "result": "\n"
+ },
+ {
+ "expression": "'✓'",
+ "result": "✓"
+ },
+ {
+ "expression": "'𝄞'",
+ "result": "𝄞"
+ },
+ {
+ "expression": "' [foo] '",
+ "result": " [foo] "
+ },
+ {
+ "expression": "'[foo]'",
+ "result": "[foo]"
+ },
+ {
+ "comment": "Do not interpret escaped unicode.",
+ "expression": "'\\u03a6'",
+ "result": "\\u03a6"
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json
new file mode 100644
index 0000000..8f2a481
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json
@@ -0,0 +1,393 @@
+[{
+ "given": {
+ "foo": {
+ "bar": "bar",
+ "baz": "baz",
+ "qux": "qux",
+ "nested": {
+ "one": {
+ "a": "first",
+ "b": "second",
+ "c": "third"
+ },
+ "two": {
+ "a": "first",
+ "b": "second",
+ "c": "third"
+ },
+ "three": {
+ "a": "first",
+ "b": "second",
+ "c": {"inner": "third"}
+ }
+ }
+ },
+ "bar": 1,
+ "baz": 2,
+ "qux\"": 3
+ },
+ "cases": [
+ {
+ "expression": "foo.{bar: bar}",
+ "result": {"bar": "bar"}
+ },
+ {
+ "expression": "foo.{\"bar\": bar}",
+ "result": {"bar": "bar"}
+ },
+ {
+ "expression": "foo.{\"foo.bar\": bar}",
+ "result": {"foo.bar": "bar"}
+ },
+ {
+ "expression": "foo.{bar: bar, baz: baz}",
+ "result": {"bar": "bar", "baz": "baz"}
+ },
+ {
+ "expression": "foo.{\"bar\": bar, \"baz\": baz}",
+ "result": {"bar": "bar", "baz": "baz"}
+ },
+ {
+ "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}",
+ "result": {"baz": 2, "qux\"": 3}
+ },
+ {
+ "expression": "foo.{bar:bar,baz:baz}",
+ "result": {"bar": "bar", "baz": "baz"}
+ },
+ {
+ "expression": "foo.{bar: bar,qux: qux}",
+ "result": {"bar": "bar", "qux": "qux"}
+ },
+ {
+ "expression": "foo.{bar: bar, noexist: noexist}",
+ "result": {"bar": "bar", "noexist": null}
+ },
+ {
+ "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}",
+ "result": {"noexist": null, "alsonoexist": null}
+ },
+ {
+ "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}",
+ "result": null
+ },
+ {
+ "expression": "foo.nested.*.{a: a,b: b}",
+ "result": [{"a": "first", "b": "second"},
+ {"a": "first", "b": "second"},
+ {"a": "first", "b": "second"}]
+ },
+ {
+ "expression": "foo.nested.three.{a: a, cinner: c.inner}",
+ "result": {"a": "first", "cinner": "third"}
+ },
+ {
+ "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}",
+ "result": {"a": "first", "c": null}
+ },
+ {
+ "expression": "foo.{a: nested.one.a, b: nested.two.b}",
+ "result": {"a": "first", "b": "second"}
+ },
+ {
+ "expression": "{bar: bar, baz: baz}",
+ "result": {"bar": 1, "baz": 2}
+ },
+ {
+ "expression": "{bar: bar}",
+ "result": {"bar": 1}
+ },
+ {
+ "expression": "{otherkey: bar}",
+ "result": {"otherkey": 1}
+ },
+ {
+ "expression": "{no: no, exist: exist}",
+ "result": {"no": null, "exist": null}
+ },
+ {
+ "expression": "foo.[bar]",
+ "result": ["bar"]
+ },
+ {
+ "expression": "foo.[bar,baz]",
+ "result": ["bar", "baz"]
+ },
+ {
+ "expression": "foo.[bar,qux]",
+ "result": ["bar", "qux"]
+ },
+ {
+ "expression": "foo.[bar,noexist]",
+ "result": ["bar", null]
+ },
+ {
+ "expression": "foo.[noexist,alsonoexist]",
+ "result": [null, null]
+ }
+ ]
+}, {
+ "given": {
+ "foo": {"bar": 1, "baz": [2, 3, 4]}
+ },
+ "cases": [
+ {
+ "expression": "foo.{bar:bar,baz:baz}",
+ "result": {"bar": 1, "baz": [2, 3, 4]}
+ },
+ {
+ "expression": "foo.[bar,baz[0]]",
+ "result": [1, 2]
+ },
+ {
+ "expression": "foo.[bar,baz[1]]",
+ "result": [1, 3]
+ },
+ {
+ "expression": "foo.[bar,baz[2]]",
+ "result": [1, 4]
+ },
+ {
+ "expression": "foo.[bar,baz[3]]",
+ "result": [1, null]
+ },
+ {
+ "expression": "foo.[bar[0],baz[3]]",
+ "result": [null, null]
+ }
+ ]
+}, {
+ "given": {
+ "foo": {"bar": 1, "baz": 2}
+ },
+ "cases": [
+ {
+ "expression": "foo.{bar: bar, baz: baz}",
+ "result": {"bar": 1, "baz": 2}
+ },
+ {
+ "expression": "foo.[bar,baz]",
+ "result": [1, 2]
+ }
+ ]
+}, {
+ "given": {
+ "foo": {
+ "bar": {"baz": [{"common": "first", "one": 1},
+ {"common": "second", "two": 2}]},
+ "ignoreme": 1,
+ "includeme": true
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.{bar: bar.baz[1],includeme: includeme}",
+ "result": {"bar": {"common": "second", "two": 2}, "includeme": true}
+ },
+ {
+ "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}",
+ "result": {"bar.baz.two": 2, "includeme": true}
+ },
+ {
+ "expression": "foo.[includeme, bar.baz[*].common]",
+ "result": [true, ["first", "second"]]
+ },
+ {
+ "expression": "foo.[includeme, bar.baz[*].none]",
+ "result": [true, []]
+ },
+ {
+ "expression": "foo.[includeme, bar.baz[].common]",
+ "result": [true, ["first", "second"]]
+ }
+ ]
+}, {
+ "given": {
+ "reservations": [{
+ "instances": [
+ {"id": "id1",
+ "name": "first"},
+ {"id": "id2",
+ "name": "second"}
+ ]}, {
+ "instances": [
+ {"id": "id3",
+ "name": "third"},
+ {"id": "id4",
+ "name": "fourth"}
+ ]}
+ ]},
+ "cases": [
+ {
+ "expression": "reservations[*].instances[*].{id: id, name: name}",
+ "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}],
+ [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]]
+ },
+ {
+ "expression": "reservations[].instances[].{id: id, name: name}",
+ "result": [{"id": "id1", "name": "first"},
+ {"id": "id2", "name": "second"},
+ {"id": "id3", "name": "third"},
+ {"id": "id4", "name": "fourth"}]
+ },
+ {
+ "expression": "reservations[].instances[].[id, name]",
+ "result": [["id1", "first"],
+ ["id2", "second"],
+ ["id3", "third"],
+ ["id4", "fourth"]]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [{
+ "bar": [
+ {
+ "qux": 2,
+ "baz": 1
+ },
+ {
+ "qux": 4,
+ "baz": 3
+ }
+ ]
+ },
+ {
+ "bar": [
+ {
+ "qux": 6,
+ "baz": 5
+ },
+ {
+ "qux": 8,
+ "baz": 7
+ }
+ ]
+ }
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo",
+ "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]},
+ {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}]
+ },
+ {
+ "expression": "foo[]",
+ "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]},
+ {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}]
+ },
+ {
+ "expression": "foo[].bar",
+ "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}],
+ [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]]
+ },
+ {
+ "expression": "foo[].bar[]",
+ "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3},
+ {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]
+ },
+ {
+ "expression": "foo[].bar[].[baz, qux]",
+ "result": [[1, 2], [3, 4], [5, 6], [7, 8]]
+ },
+ {
+ "expression": "foo[].bar[].[baz]",
+ "result": [[1], [3], [5], [7]]
+ },
+ {
+ "expression": "foo[].bar[].[baz, qux][]",
+ "result": [1, 2, 3, 4, 5, 6, 7, 8]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": {
+ "baz": [
+ {
+ "bar": "abc"
+ }, {
+ "bar": "def"
+ }
+ ],
+ "qux": ["zero"]
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.[baz[*].bar, qux[0]]",
+ "result": [["abc", "def"], "zero"]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": {
+ "baz": [
+ {
+ "bar": "a",
+ "bam": "b",
+ "boo": "c"
+ }, {
+ "bar": "d",
+ "bam": "e",
+ "boo": "f"
+ }
+ ],
+ "qux": ["zero"]
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.[baz[*].[bar, boo], qux[0]]",
+ "result": [[["a", "c" ], ["d", "f" ]], "zero"]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": {
+ "baz": [
+ {
+ "bar": "a",
+ "bam": "b",
+ "boo": "c"
+ }, {
+ "bar": "d",
+ "bam": "e",
+ "boo": "f"
+ }
+ ],
+ "qux": ["zero"]
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]",
+ "result": [["a", "d"], "zero"]
+ }
+ ]
+},
+{
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "comment": "Nested multiselect",
+ "expression": "[[*],*]",
+ "result": [null, ["object"]]
+ }
+ ]
+},
+{
+ "given": [],
+ "cases": [
+ {
+ "comment": "Nested multiselect",
+ "expression": "[[*]]",
+ "result": [[]]
+ }
+ ]
+}
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json
new file mode 100644
index 0000000..2127cf4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json
@@ -0,0 +1,59 @@
+[{
+ "given":
+ {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}},
+ "cases": [
+ {
+ "expression": "outer.foo || outer.bar",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.foo||outer.bar",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.bar || outer.baz",
+ "result": "bar"
+ },
+ {
+ "expression": "outer.bar||outer.baz",
+ "result": "bar"
+ },
+ {
+ "expression": "outer.bad || outer.foo",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.bad||outer.foo",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.foo || outer.bad",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.foo||outer.bad",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.bad || outer.alsobad",
+ "result": null
+ },
+ {
+ "expression": "outer.bad||outer.alsobad",
+ "result": null
+ }
+ ]
+}, {
+ "given":
+ {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}},
+ "cases": [
+ {
+ "expression": "outer.empty_string || outer.foo",
+ "result": "foo"
+ },
+ {
+ "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo",
+ "result": "foo"
+ }
+ ]
+}]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json
new file mode 100644
index 0000000..b10c0a4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json
@@ -0,0 +1,131 @@
+[{
+ "given": {
+ "foo": {
+ "bar": {
+ "baz": "subkey"
+ },
+ "other": {
+ "baz": "subkey"
+ },
+ "other2": {
+ "baz": "subkey"
+ },
+ "other3": {
+ "notbaz": ["a", "b", "c"]
+ },
+ "other4": {
+ "notbaz": ["a", "b", "c"]
+ }
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.*.baz | [0]",
+ "result": "subkey"
+ },
+ {
+ "expression": "foo.*.baz | [1]",
+ "result": "subkey"
+ },
+ {
+ "expression": "foo.*.baz | [2]",
+ "result": "subkey"
+ },
+ {
+ "expression": "foo.bar.* | [0]",
+ "result": "subkey"
+ },
+ {
+ "expression": "foo.*.notbaz | [*]",
+ "result": [["a", "b", "c"], ["a", "b", "c"]]
+ },
+ {
+ "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz",
+ "result": ["subkey", "subkey"]
+ }
+ ]
+}, {
+ "given": {
+ "foo": {
+ "bar": {
+ "baz": "one"
+ },
+ "other": {
+ "baz": "two"
+ },
+ "other2": {
+ "baz": "three"
+ },
+ "other3": {
+ "notbaz": ["a", "b", "c"]
+ },
+ "other4": {
+ "notbaz": ["d", "e", "f"]
+ }
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo | bar",
+ "result": {"baz": "one"}
+ },
+ {
+ "expression": "foo | bar | baz",
+ "result": "one"
+ },
+ {
+ "expression": "foo|bar| baz",
+ "result": "one"
+ },
+ {
+ "expression": "not_there | [0]",
+ "result": null
+ },
+ {
+ "expression": "not_there | [0]",
+ "result": null
+ },
+ {
+ "expression": "[foo.bar, foo.other] | [0]",
+ "result": {"baz": "one"}
+ },
+ {
+ "expression": "{\"a\": foo.bar, \"b\": foo.other} | a",
+ "result": {"baz": "one"}
+ },
+ {
+ "expression": "{\"a\": foo.bar, \"b\": foo.other} | b",
+ "result": {"baz": "two"}
+ },
+ {
+ "expression": "foo.bam || foo.bar | baz",
+ "result": "one"
+ },
+ {
+ "expression": "foo | not_there || bar",
+ "result": {"baz": "one"}
+ }
+ ]
+}, {
+ "given": {
+ "foo": [{
+ "bar": [{
+ "baz": "one"
+ }, {
+ "baz": "two"
+ }]
+ }, {
+ "bar": [{
+ "baz": "three"
+ }, {
+ "baz": "four"
+ }]
+ }]
+ },
+ "cases": [
+ {
+ "expression": "foo[*].bar[*] | [0][0]",
+ "result": {"baz": "one"}
+ }
+ ]
+}]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json
new file mode 100644
index 0000000..3594772
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json
@@ -0,0 +1,187 @@
+[{
+ "given": {
+ "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+ "bar": {
+ "baz": 1
+ }
+ },
+ "cases": [
+ {
+ "expression": "bar[0:10]",
+ "result": null
+ },
+ {
+ "expression": "foo[0:10:1]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[0:10]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[0:10:]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[0::1]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[0::]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[0:]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[:10:1]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[::1]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[:10:]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[::]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[:]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[1:9]",
+ "result": [1, 2, 3, 4, 5, 6, 7, 8]
+ },
+ {
+ "expression": "foo[0:10:2]",
+ "result": [0, 2, 4, 6, 8]
+ },
+ {
+ "expression": "foo[5:]",
+ "result": [5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[5::2]",
+ "result": [5, 7, 9]
+ },
+ {
+ "expression": "foo[::2]",
+ "result": [0, 2, 4, 6, 8]
+ },
+ {
+ "expression": "foo[::-1]",
+ "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+ },
+ {
+ "expression": "foo[1::2]",
+ "result": [1, 3, 5, 7, 9]
+ },
+ {
+ "expression": "foo[10:0:-1]",
+ "result": [9, 8, 7, 6, 5, 4, 3, 2, 1]
+ },
+ {
+ "expression": "foo[10:5:-1]",
+ "result": [9, 8, 7, 6]
+ },
+ {
+ "expression": "foo[8:2:-2]",
+ "result": [8, 6, 4]
+ },
+ {
+ "expression": "foo[0:20]",
+ "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ },
+ {
+ "expression": "foo[10:-20:-1]",
+ "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+ },
+ {
+ "expression": "foo[10:-20]",
+ "result": []
+ },
+ {
+ "expression": "foo[-4:-1]",
+ "result": [6, 7, 8]
+ },
+ {
+ "expression": "foo[:-5:-1]",
+ "result": [9, 8, 7, 6]
+ },
+ {
+ "expression": "foo[8:2:0]",
+ "error": "invalid-value"
+ },
+ {
+ "expression": "foo[8:2:0:1]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[8:2&]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[2:a:3]",
+ "error": "syntax"
+ }
+ ]
+}, {
+ "given": {
+ "foo": [{"a": 1}, {"a": 2}, {"a": 3}],
+ "bar": [{"a": {"b": 1}}, {"a": {"b": 2}},
+ {"a": {"b": 3}}],
+ "baz": 50
+ },
+ "cases": [
+ {
+ "expression": "foo[:2].a",
+ "result": [1, 2]
+ },
+ {
+ "expression": "foo[:2].b",
+ "result": []
+ },
+ {
+ "expression": "foo[:2].a.b",
+ "result": []
+ },
+ {
+ "expression": "bar[::-1].a.b",
+ "result": [3, 2, 1]
+ },
+ {
+ "expression": "bar[:2].a.b",
+ "result": [1, 2]
+ },
+ {
+ "expression": "baz[:2].a",
+ "result": null
+ }
+ ]
+}, {
+ "given": [{"a": 1}, {"a": 2}, {"a": 3}],
+ "cases": [
+ {
+ "expression": "[:]",
+ "result": [{"a": 1}, {"a": 2}, {"a": 3}]
+ },
+ {
+ "expression": "[:2].a",
+ "result": [1, 2]
+ },
+ {
+ "expression": "[::-1].a",
+ "result": [3, 2, 1]
+ },
+ {
+ "expression": "[:2].b",
+ "result": []
+ }
+ ]
+}]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json
new file mode 100644
index 0000000..003c294
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json
@@ -0,0 +1,616 @@
+[{
+ "comment": "Dot syntax",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "foo.bar",
+ "result": null
+ },
+ {
+ "expression": "foo.1",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.-11",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo",
+ "result": null
+ },
+ {
+ "expression": "foo.",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.",
+ "error": "syntax"
+ },
+ {
+ "expression": ".foo",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo..bar",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.bar.",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[.]",
+ "error": "syntax"
+ }
+ ]
+},
+ {
+ "comment": "Simple token errors",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": ".",
+ "error": "syntax"
+ },
+ {
+ "expression": ":",
+ "error": "syntax"
+ },
+ {
+ "expression": ",",
+ "error": "syntax"
+ },
+ {
+ "expression": "]",
+ "error": "syntax"
+ },
+ {
+ "expression": "[",
+ "error": "syntax"
+ },
+ {
+ "expression": "}",
+ "error": "syntax"
+ },
+ {
+ "expression": "{",
+ "error": "syntax"
+ },
+ {
+ "expression": ")",
+ "error": "syntax"
+ },
+ {
+ "expression": "(",
+ "error": "syntax"
+ },
+ {
+ "expression": "((&",
+ "error": "syntax"
+ },
+ {
+ "expression": "a[",
+ "error": "syntax"
+ },
+ {
+ "expression": "a]",
+ "error": "syntax"
+ },
+ {
+ "expression": "a][",
+ "error": "syntax"
+ },
+ {
+ "expression": "!",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Boolean syntax errors",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "![!(!",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Wildcard syntax",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "*",
+ "result": ["object"]
+ },
+ {
+ "expression": "*.*",
+ "result": []
+ },
+ {
+ "expression": "*.foo",
+ "result": []
+ },
+ {
+ "expression": "*[0]",
+ "result": []
+ },
+ {
+ "expression": ".*",
+ "error": "syntax"
+ },
+ {
+ "expression": "*foo",
+ "error": "syntax"
+ },
+ {
+ "expression": "*0",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[*]bar",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[*]*",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Flatten syntax",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "[]",
+ "result": null
+ }
+ ]
+ },
+ {
+ "comment": "Simple bracket syntax",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "[0]",
+ "result": null
+ },
+ {
+ "expression": "[*]",
+ "result": null
+ },
+ {
+ "expression": "*.[0]",
+ "error": "syntax"
+ },
+ {
+ "expression": "*.[\"0\"]",
+ "result": [[null]]
+ },
+ {
+ "expression": "[*].bar",
+ "result": null
+ },
+ {
+ "expression": "[*][0]",
+ "result": null
+ },
+ {
+ "expression": "foo[#]",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Multi-select list syntax",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "foo[0]",
+ "result": null
+ },
+ {
+ "comment": "Valid multi-select of a list",
+ "expression": "foo[0, 1]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.[0]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.[*]",
+ "result": null
+ },
+ {
+ "comment": "Multi-select of a list with trailing comma",
+ "expression": "foo[0, ]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list with trailing comma and no close",
+ "expression": "foo[0,",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list with trailing comma and no close",
+ "expression": "foo.[a",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list with extra comma",
+ "expression": "foo[0,, 1]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list using an identifier index",
+ "expression": "foo[abc]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list using identifier indices",
+ "expression": "foo[abc, def]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list using an identifier index",
+ "expression": "foo[abc, 1]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a list using an identifier index with trailing comma",
+ "expression": "foo[abc, ]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Valid multi-select of a hash using an identifier index",
+ "expression": "foo.[abc]",
+ "result": null
+ },
+ {
+ "comment": "Valid multi-select of a hash",
+ "expression": "foo.[abc, def]",
+ "result": null
+ },
+ {
+ "comment": "Multi-select of a hash using a numeric index",
+ "expression": "foo.[abc, 1]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a hash with a trailing comma",
+ "expression": "foo.[abc, ]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a hash with extra commas",
+ "expression": "foo.[abc,, def]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Multi-select of a hash using number indices",
+ "expression": "foo.[0, 1]",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Multi-select hash syntax",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "comment": "No key or value",
+ "expression": "a{}",
+ "error": "syntax"
+ },
+ {
+ "comment": "No closing token",
+ "expression": "a{",
+ "error": "syntax"
+ },
+ {
+ "comment": "Not a key value pair",
+ "expression": "a{foo}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing value and closing character",
+ "expression": "a{foo:",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing closing character",
+ "expression": "a{foo: 0",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing value",
+ "expression": "a{foo:}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Trailing comma and no closing character",
+ "expression": "a{foo: 0, ",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing value with trailing comma",
+ "expression": "a{foo: ,}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Accessing Array using an identifier",
+ "expression": "a{foo: bar}",
+ "error": "syntax"
+ },
+ {
+ "expression": "a{foo: 0}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing key-value pair",
+ "expression": "a.{}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Not a key-value pair",
+ "expression": "a.{foo}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing value",
+ "expression": "a.{foo:}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing value with trailing comma",
+ "expression": "a.{foo: ,}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Valid multi-select hash extraction",
+ "expression": "a.{foo: bar}",
+ "result": null
+ },
+ {
+ "comment": "Valid multi-select hash extraction",
+ "expression": "a.{foo: bar, baz: bam}",
+ "result": null
+ },
+ {
+ "comment": "Trailing comma",
+ "expression": "a.{foo: bar, }",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing key in second key-value pair",
+ "expression": "a.{foo: bar, baz}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Missing value in second key-value pair",
+ "expression": "a.{foo: bar, baz:}",
+ "error": "syntax"
+ },
+ {
+ "comment": "Trailing comma",
+ "expression": "a.{foo: bar, baz: bam, }",
+ "error": "syntax"
+ },
+ {
+ "comment": "Nested multi select",
+ "expression": "{\"\\\\\":{\" \":*}}",
+ "result": {"\\": {" ": ["object"]}}
+ }
+ ]
+ },
+ {
+ "comment": "Or expressions",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "foo || bar",
+ "result": null
+ },
+ {
+ "expression": "foo ||",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.|| bar",
+ "error": "syntax"
+ },
+ {
+ "expression": " || foo",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo || || foo",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo.[a || b]",
+ "result": null
+ },
+ {
+ "expression": "foo.[a ||]",
+ "error": "syntax"
+ },
+ {
+ "expression": "\"foo",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Filter expressions",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "foo[?bar==`\"baz\"`]",
+ "result": null
+ },
+ {
+ "expression": "foo[? bar == `\"baz\"` ]",
+ "result": null
+ },
+ {
+ "expression": "foo[ ?bar==`\"baz\"`]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[?bar==]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[?==]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[?==bar]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[?bar==baz?]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[?a.b.c==d.e.f]",
+ "result": null
+ },
+ {
+ "expression": "foo[?bar==`[0, 1, 2]`]",
+ "result": null
+ },
+ {
+ "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]",
+ "result": null
+ },
+ {
+ "comment": "Literal char not escaped",
+ "expression": "foo[?bar==`[\"foo`bar\"]`]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Literal char escaped",
+ "expression": "foo[?bar==`[\"foo\\`bar\"]`]",
+ "result": null
+ },
+ {
+ "comment": "Unknown comparator",
+ "expression": "foo[?bar<>baz]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Unknown comparator",
+ "expression": "foo[?bar^baz]",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[bar==baz]",
+ "error": "syntax"
+ },
+ {
+ "comment": "Quoted identifier in filter expression no spaces",
+ "expression": "[?\"\\\\\">`\"foo\"`]",
+ "result": null
+ },
+ {
+ "comment": "Quoted identifier in filter expression with spaces",
+ "expression": "[?\"\\\\\" > `\"foo\"`]",
+ "result": null
+ }
+ ]
+ },
+ {
+ "comment": "Filter expression errors",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "bar.`\"anything\"`",
+ "error": "syntax"
+ },
+ {
+ "expression": "bar.baz.noexists.`\"literal\"`",
+ "error": "syntax"
+ },
+ {
+ "comment": "Literal wildcard projection",
+ "expression": "foo[*].`\"literal\"`",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[*].name.`\"literal\"`",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[].name.`\"literal\"`",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`",
+ "error": "syntax"
+ },
+ {
+ "comment": "Projecting a literal onto an empty list",
+ "expression": "foo[*].name.noexist.`\"literal\"`",
+ "error": "syntax"
+ },
+ {
+ "expression": "foo[].name.noexist.`\"literal\"`",
+ "error": "syntax"
+ },
+ {
+ "expression": "twolen[*].`\"foo\"`",
+ "error": "syntax"
+ },
+ {
+ "comment": "Two level projection of a literal",
+ "expression": "twolen[*].threelen[*].`\"bar\"`",
+ "error": "syntax"
+ },
+ {
+ "comment": "Two level flattened projection of a literal",
+ "expression": "twolen[].threelen[].`\"bar\"`",
+ "error": "syntax"
+ }
+ ]
+ },
+ {
+ "comment": "Identifiers",
+ "given": {"type": "object"},
+ "cases": [
+ {
+ "expression": "foo",
+ "result": null
+ },
+ {
+ "expression": "\"foo\"",
+ "result": null
+ },
+ {
+ "expression": "\"\\\\\"",
+ "result": null
+ }
+ ]
+ },
+ {
+ "comment": "Combined syntax",
+ "given": [],
+ "cases": [
+ {
+ "expression": "*||*|*|*",
+ "result": null
+ },
+ {
+ "expression": "*[]||[*]",
+ "result": []
+ },
+ {
+ "expression": "[*.*]",
+ "result": [null]
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json
new file mode 100644
index 0000000..6b07b0b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json
@@ -0,0 +1,38 @@
+[
+ {
+ "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]},
+ "cases": [
+ {
+ "expression": "foo[].\"✓\"",
+ "result": ["✓", "✗"]
+ }
+ ]
+ },
+ {
+ "given": {"☯": true},
+ "cases": [
+ {
+ "expression": "\"☯\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪": true},
+ "cases": [
+ {
+ "expression": "\"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪\"",
+ "result": true
+ }
+ ]
+ },
+ {
+ "given": {"☃": true},
+ "cases": [
+ {
+ "expression": "\"☃\"",
+ "result": true
+ }
+ ]
+ }
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json
new file mode 100644
index 0000000..3bcec30
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json
@@ -0,0 +1,460 @@
+[{
+ "given": {
+ "foo": {
+ "bar": {
+ "baz": "val"
+ },
+ "other": {
+ "baz": "val"
+ },
+ "other2": {
+ "baz": "val"
+ },
+ "other3": {
+ "notbaz": ["a", "b", "c"]
+ },
+ "other4": {
+ "notbaz": ["a", "b", "c"]
+ },
+ "other5": {
+ "other": {
+ "a": 1,
+ "b": 1,
+ "c": 1
+ }
+ }
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.*.baz",
+ "result": ["val", "val", "val"]
+ },
+ {
+ "expression": "foo.bar.*",
+ "result": ["val"]
+ },
+ {
+ "expression": "foo.*.notbaz",
+ "result": [["a", "b", "c"], ["a", "b", "c"]]
+ },
+ {
+ "expression": "foo.*.notbaz[0]",
+ "result": ["a", "a"]
+ },
+ {
+ "expression": "foo.*.notbaz[-1]",
+ "result": ["c", "c"]
+ }
+ ]
+}, {
+ "given": {
+ "foo": {
+ "first-1": {
+ "second-1": "val"
+ },
+ "first-2": {
+ "second-1": "val"
+ },
+ "first-3": {
+ "second-1": "val"
+ }
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.*",
+ "result": [{"second-1": "val"}, {"second-1": "val"},
+ {"second-1": "val"}]
+ },
+ {
+ "expression": "foo.*.*",
+ "result": [["val"], ["val"], ["val"]]
+ },
+ {
+ "expression": "foo.*.*.*",
+ "result": [[], [], []]
+ },
+ {
+ "expression": "foo.*.*.*.*",
+ "result": [[], [], []]
+ }
+ ]
+}, {
+ "given": {
+ "foo": {
+ "bar": "one"
+ },
+ "other": {
+ "bar": "one"
+ },
+ "nomatch": {
+ "notbar": "three"
+ }
+ },
+ "cases": [
+ {
+ "expression": "*.bar",
+ "result": ["one", "one"]
+ }
+ ]
+}, {
+ "given": {
+ "top1": {
+ "sub1": {"foo": "one"}
+ },
+ "top2": {
+ "sub1": {"foo": "one"}
+ }
+ },
+ "cases": [
+ {
+ "expression": "*",
+ "result": [{"sub1": {"foo": "one"}},
+ {"sub1": {"foo": "one"}}]
+ },
+ {
+ "expression": "*.sub1",
+ "result": [{"foo": "one"},
+ {"foo": "one"}]
+ },
+ {
+ "expression": "*.*",
+ "result": [[{"foo": "one"}],
+ [{"foo": "one"}]]
+ },
+ {
+ "expression": "*.*.foo[]",
+ "result": ["one", "one"]
+ },
+ {
+ "expression": "*.sub1.foo",
+ "result": ["one", "one"]
+ }
+ ]
+},
+{
+ "given":
+ {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]},
+ "cases": [
+ {
+ "expression": "foo[*].bar",
+ "result": ["one", "two", "three"]
+ },
+ {
+ "expression": "foo[*].notbar",
+ "result": ["four"]
+ }
+ ]
+},
+{
+ "given":
+ [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}],
+ "cases": [
+ {
+ "expression": "[*]",
+ "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]
+ },
+ {
+ "expression": "[*].bar",
+ "result": ["one", "two", "three"]
+ },
+ {
+ "expression": "[*].notbar",
+ "result": ["four"]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": {
+ "bar": [
+ {"baz": ["one", "two", "three"]},
+ {"baz": ["four", "five", "six"]},
+ {"baz": ["seven", "eight", "nine"]}
+ ]
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.bar[*].baz",
+ "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]]
+ },
+ {
+ "expression": "foo.bar[*].baz[0]",
+ "result": ["one", "four", "seven"]
+ },
+ {
+ "expression": "foo.bar[*].baz[1]",
+ "result": ["two", "five", "eight"]
+ },
+ {
+ "expression": "foo.bar[*].baz[2]",
+ "result": ["three", "six", "nine"]
+ },
+ {
+ "expression": "foo.bar[*].baz[3]",
+ "result": []
+ }
+ ]
+},
+{
+ "given": {
+ "foo": {
+ "bar": [["one", "two"], ["three", "four"]]
+ }
+ },
+ "cases": [
+ {
+ "expression": "foo.bar[*]",
+ "result": [["one", "two"], ["three", "four"]]
+ },
+ {
+ "expression": "foo.bar[0]",
+ "result": ["one", "two"]
+ },
+ {
+ "expression": "foo.bar[0][0]",
+ "result": "one"
+ },
+ {
+ "expression": "foo.bar[0][0][0]",
+ "result": null
+ },
+ {
+ "expression": "foo.bar[0][0][0][0]",
+ "result": null
+ },
+ {
+ "expression": "foo[0][0]",
+ "result": null
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [
+ {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]},
+ {"bar": [{"kind": "advanced"}, {"kind": "expert"}]},
+ {"bar": "string"}
+ ]
+
+ },
+ "cases": [
+ {
+ "expression": "foo[*].bar[*].kind",
+ "result": [["basic", "intermediate"], ["advanced", "expert"]]
+ },
+ {
+ "expression": "foo[*].bar[0].kind",
+ "result": ["basic", "advanced"]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [
+ {"bar": {"kind": "basic"}},
+ {"bar": {"kind": "intermediate"}},
+ {"bar": {"kind": "advanced"}},
+ {"bar": {"kind": "expert"}},
+ {"bar": "string"}
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo[*].bar.kind",
+ "result": ["basic", "intermediate", "advanced", "expert"]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}]
+ },
+ "cases": [
+ {
+ "expression": "foo[*].bar[0]",
+ "result": ["one", "three", "five"]
+ },
+ {
+ "expression": "foo[*].bar[1]",
+ "result": ["two", "four"]
+ },
+ {
+ "expression": "foo[*].bar[2]",
+ "result": []
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [{"bar": []}, {"bar": []}, {"bar": []}]
+ },
+ "cases": [
+ {
+ "expression": "foo[*].bar[0]",
+ "result": []
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [["one", "two"], ["three", "four"], ["five"]]
+ },
+ "cases": [
+ {
+ "expression": "foo[*][0]",
+ "result": ["one", "three", "five"]
+ },
+ {
+ "expression": "foo[*][1]",
+ "result": ["two", "four"]
+ }
+ ]
+},
+{
+ "given": {
+ "foo": [
+ [
+ ["one", "two"], ["three", "four"]
+ ], [
+ ["five", "six"], ["seven", "eight"]
+ ], [
+ ["nine"], ["ten"]
+ ]
+ ]
+ },
+ "cases": [
+ {
+ "expression": "foo[*][0]",
+ "result": [["one", "two"], ["five", "six"], ["nine"]]
+ },
+ {
+ "expression": "foo[*][1]",
+ "result": [["three", "four"], ["seven", "eight"], ["ten"]]
+ },
+ {
+ "expression": "foo[*][0][0]",
+ "result": ["one", "five", "nine"]
+ },
+ {
+ "expression": "foo[*][1][0]",
+ "result": ["three", "seven", "ten"]
+ },
+ {
+ "expression": "foo[*][0][1]",
+ "result": ["two", "six"]
+ },
+ {
+ "expression": "foo[*][1][1]",
+ "result": ["four", "eight"]
+ },
+ {
+ "expression": "foo[*][2]",
+ "result": []
+ },
+ {
+ "expression": "foo[*][2][2]",
+ "result": []
+ },
+ {
+ "expression": "bar[*]",
+ "result": null
+ },
+ {
+ "expression": "bar[*].baz[*]",
+ "result": null
+ }
+ ]
+},
+{
+ "given": {
+ "string": "string",
+ "hash": {"foo": "bar", "bar": "baz"},
+ "number": 23,
+ "nullvalue": null
+ },
+ "cases": [
+ {
+ "expression": "string[*]",
+ "result": null
+ },
+ {
+ "expression": "hash[*]",
+ "result": null
+ },
+ {
+ "expression": "number[*]",
+ "result": null
+ },
+ {
+ "expression": "nullvalue[*]",
+ "result": null
+ },
+ {
+ "expression": "string[*].foo",
+ "result": null
+ },
+ {
+ "expression": "hash[*].foo",
+ "result": null
+ },
+ {
+ "expression": "number[*].foo",
+ "result": null
+ },
+ {
+ "expression": "nullvalue[*].foo",
+ "result": null
+ },
+ {
+ "expression": "nullvalue[*].foo[*].bar",
+ "result": null
+ }
+ ]
+},
+{
+ "given": {
+ "string": "string",
+ "hash": {"foo": "val", "bar": "val"},
+ "number": 23,
+ "array": [1, 2, 3],
+ "nullvalue": null
+ },
+ "cases": [
+ {
+ "expression": "string.*",
+ "result": null
+ },
+ {
+ "expression": "hash.*",
+ "result": ["val", "val"]
+ },
+ {
+ "expression": "number.*",
+ "result": null
+ },
+ {
+ "expression": "array.*",
+ "result": null
+ },
+ {
+ "expression": "nullvalue.*",
+ "result": null
+ }
+ ]
+},
+{
+ "given": {
+ "a": [0, 1, 2],
+ "b": [0, 1, 2]
+ },
+ "cases": [
+ {
+ "expression": "*[0]",
+ "result": [0, 0]
+ }
+ ]
+}
+]
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 0000000..9b7cd89
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go
new file mode 100644
index 0000000..c7df087
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go
@@ -0,0 +1,13 @@
+package jmespath
+
+import "github.com/jmespath/go-jmespath"
+
+// Fuzz will fuzz test the JMESPath parser.
+func Fuzz(data []byte) int {
+ p := jmespath.NewParser()
+ _, err := p.Parse(string(data))
+ if err != nil {
+ return 1
+ }
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 0000000..13c7460
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 0000000..817900c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 0000000..1240a17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 0000000..dae79cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 0000000..ddc1b7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore b/Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml b/Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml
new file mode 100644
index 0000000..c62e25f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+ - 1.5
+ - tip
+
+script:
+ - go test -v .
+ - go test -v -race .
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE b/Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE
new file mode 100644
index 0000000..4fd5963
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2015 Klaus Post
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/README.md b/Godeps/_workspace/src/github.com/klauspost/crc32/README.md
new file mode 100644
index 0000000..440541c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/README.md
@@ -0,0 +1,84 @@
+# crc32
+CRC32 hash with x64 optimizations
+
+This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
+
+[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32)
+
+# usage
+
+Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
+
+Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
+
+# changes
+
+* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
+
+
+# performance
+
+For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
+```
+benchmark old ns/op new ns/op delta
+BenchmarkCrc32KB 99955 10258 -89.74%
+
+benchmark old MB/s new MB/s speedup
+BenchmarkCrc32KB 327.83 3194.20 9.74x
+```
+
+For other tables and "CLMUL" capable machines the performance is the same as the standard library.
+
+Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
+
+```
+Std: Standard Go 1.5 library
+Crc: Indicates IEEE type CRC.
+40B: Size of each slice encoded.
+NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
+Castagnoli: Castagnoli CRC type.
+
+BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s
+BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8)
+BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8)
+
+BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s
+BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8)
+BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm)
+
+BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8)
+BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8)
+BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm)
+
+BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8)
+BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8)
+BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s
+BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm)
+BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8)
+BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s
+BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm)
+BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8)
+BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s
+BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm)
+BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8)
+BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s
+BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm)
+BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8)
+BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm)
+```
+
+The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
+
+However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
+
+# license
+
+Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go
new file mode 100644
index 0000000..b584e41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go
@@ -0,0 +1,182 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
+// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
+// information.
+//
+// Polynomials are represented in LSB-first form also known as reversed representation.
+//
+// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
+// for information.
+package crc32
+
+import (
+ "hash"
+ "sync"
+)
+
+// The size of a CRC-32 checksum in bytes.
+const Size = 4
+
+// Predefined polynomials.
+const (
+ // IEEE is by far and away the most common CRC-32 polynomial.
+ // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
+ IEEE = 0xedb88320
+
+ // Castagnoli's polynomial, used in iSCSI.
+ // Has better error detection characteristics than IEEE.
+ // http://dx.doi.org/10.1109/26.231911
+ Castagnoli = 0x82f63b78
+
+ // Koopman's polynomial.
+ // Also has better error detection characteristics than IEEE.
+ // http://dx.doi.org/10.1109/DSN.2002.1028931
+ Koopman = 0xeb31d82e
+)
+
+// Table is a 256-word table representing the polynomial for efficient processing.
+type Table [256]uint32
+
+// castagnoliTable points to a lazily initialized Table for the Castagnoli
+// polynomial. MakeTable will always return this value when asked to make a
+// Castagnoli table so we can compare against it to find when the caller is
+// using this polynomial.
+var castagnoliTable *Table
+var castagnoliTable8 *slicing8Table
+var castagnoliOnce sync.Once
+
+func castagnoliInit() {
+ castagnoliTable = makeTable(Castagnoli)
+ castagnoliTable8 = makeTable8(Castagnoli)
+}
+
+// IEEETable is the table for the IEEE polynomial.
+var IEEETable = makeTable(IEEE)
+
+// slicing8Table is array of 8 Tables
+type slicing8Table [8]Table
+
+// iEEETable8 is the slicing8Table for IEEE
+var iEEETable8 *slicing8Table
+var iEEETable8Once sync.Once
+
+// MakeTable returns the Table constructed from the specified polynomial.
+func MakeTable(poly uint32) *Table {
+ switch poly {
+ case IEEE:
+ return IEEETable
+ case Castagnoli:
+ castagnoliOnce.Do(castagnoliInit)
+ return castagnoliTable
+ }
+ return makeTable(poly)
+}
+
+// makeTable returns the Table constructed from the specified polynomial.
+func makeTable(poly uint32) *Table {
+ t := new(Table)
+ for i := 0; i < 256; i++ {
+ crc := uint32(i)
+ for j := 0; j < 8; j++ {
+ if crc&1 == 1 {
+ crc = (crc >> 1) ^ poly
+ } else {
+ crc >>= 1
+ }
+ }
+ t[i] = crc
+ }
+ return t
+}
+
+// makeTable8 returns slicing8Table constructed from the specified polynomial.
+func makeTable8(poly uint32) *slicing8Table {
+ t := new(slicing8Table)
+ t[0] = *makeTable(poly)
+ for i := 0; i < 256; i++ {
+ crc := t[0][i]
+ for j := 1; j < 8; j++ {
+ crc = t[0][crc&0xFF] ^ (crc >> 8)
+ t[j][i] = crc
+ }
+ }
+ return t
+}
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ crc uint32
+ tab *Table
+}
+
+// New creates a new hash.Hash32 computing the CRC-32 checksum
+// using the polynomial represented by the Table.
+func New(tab *Table) hash.Hash32 { return &digest{0, tab} }
+
+// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
+// using the IEEE polynomial.
+func NewIEEE() hash.Hash32 { return New(IEEETable) }
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return 1 }
+
+func (d *digest) Reset() { d.crc = 0 }
+
+func update(crc uint32, tab *Table, p []byte) uint32 {
+ crc = ^crc
+ for _, v := range p {
+ crc = tab[byte(crc)^v] ^ (crc >> 8)
+ }
+ return ^crc
+}
+
+// updateSlicingBy8 updates CRC using Slicing-by-8
+func updateSlicingBy8(crc uint32, tab *slicing8Table, p []byte) uint32 {
+ crc = ^crc
+ for len(p) > 8 {
+ crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+ crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
+ tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
+ tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
+ p = p[8:]
+ }
+ crc = ^crc
+ if len(p) == 0 {
+ return crc
+ }
+ return update(crc, &tab[0], p)
+}
+
+// Update returns the result of adding the bytes in p to the crc.
+func Update(crc uint32, tab *Table, p []byte) uint32 {
+ if tab == castagnoliTable {
+ return updateCastagnoli(crc, p)
+ } else if tab == IEEETable {
+ return updateIEEE(crc, p)
+ }
+ return update(crc, tab, p)
+}
+
+func (d *digest) Write(p []byte) (n int, err error) {
+ d.crc = Update(d.crc, d.tab, p)
+ return len(p), nil
+}
+
+func (d *digest) Sum32() uint32 { return d.crc }
+
+func (d *digest) Sum(in []byte) []byte {
+ s := d.Sum32()
+ return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
+
+// Checksum returns the CRC-32 checksum of data
+// using the polynomial represented by the Table.
+func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
+
+// ChecksumIEEE returns the CRC-32 checksum of data
+// using the IEEE polynomial.
+func ChecksumIEEE(data []byte) uint32 { return updateIEEE(0, data) }
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go
new file mode 100644
index 0000000..2766f96
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go
@@ -0,0 +1,62 @@
+//+build !appengine
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crc32
+
+// This file contains the code to call the SSE 4.2 version of the Castagnoli
+// and IEEE CRC.
+
+// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and uses
+// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
+func haveSSE41() bool
+func haveSSE42() bool
+func haveCLMUL() bool
+
+// castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32
+// instruction.
+// go:noescape
+func castagnoliSSE42(crc uint32, p []byte) uint32
+
+// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
+// instruction as well as SSE 4.1.
+// go:noescape
+func ieeeCLMUL(crc uint32, p []byte) uint32
+
+var sse42 = haveSSE42()
+var useFastIEEE = haveCLMUL() && haveSSE41()
+
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+ if sse42 {
+ return castagnoliSSE42(crc, p)
+ }
+ // only use slicing-by-8 when input is >= 16 Bytes
+ if len(p) >= 16 {
+ return updateSlicingBy8(crc, castagnoliTable8, p)
+ }
+ return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+ if useFastIEEE && len(p) >= 64 {
+ left := len(p) & 15
+ do := len(p) - left
+ crc := ^ieeeCLMUL(^crc, p[:do])
+ if left > 0 {
+ crc = update(crc, IEEETable, p[do:])
+ }
+ return crc
+ }
+
+ // only use slicing-by-8 when input is >= 16 Bytes
+ if len(p) >= 16 {
+ iEEETable8Once.Do(func() {
+ iEEETable8 = makeTable8(IEEE)
+ })
+ return updateSlicingBy8(crc, iEEETable8, p)
+ }
+
+ return update(crc, IEEETable, p)
+}
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s
new file mode 100644
index 0000000..e1426bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s
@@ -0,0 +1,235 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define NOSPLIT 4
+#define RODATA 8
+
+// func castagnoliSSE42(crc uint32, p []byte) uint32
+TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
+ MOVL crc+0(FP), AX // CRC value
+ MOVQ p+8(FP), SI // data pointer
+ MOVQ p_len+16(FP), CX // len(p)
+
+ NOTL AX
+
+ // If there's less than 8 bytes to process, we do it byte-by-byte.
+ CMPQ CX, $8
+ JL cleanup
+
+ // Process individual bytes until the input is 8-byte aligned.
+startup:
+ MOVQ SI, BX
+ ANDQ $7, BX
+ JZ aligned
+
+ CRC32B (SI), AX
+ DECQ CX
+ INCQ SI
+ JMP startup
+
+aligned:
+ // The input is now 8-byte aligned and we can process 8-byte chunks.
+ CMPQ CX, $8
+ JL cleanup
+
+ CRC32Q (SI), AX
+ ADDQ $8, SI
+ SUBQ $8, CX
+ JMP aligned
+
+cleanup:
+ // We may have some bytes left over that we process one at a time.
+ CMPQ CX, $0
+ JE done
+
+ CRC32B (SI), AX
+ INCQ SI
+ DECQ CX
+ JMP cleanup
+
+done:
+ NOTL AX
+ MOVL AX, ret+32(FP)
+ RET
+
+// func haveSSE42() bool
+TEXT ·haveSSE42(SB), NOSPLIT, $0
+ XORQ AX, AX
+ INCL AX
+ CPUID
+ SHRQ $20, CX
+ ANDQ $1, CX
+ MOVB CX, ret+0(FP)
+ RET
+
+// func haveCLMUL() bool
+TEXT ·haveCLMUL(SB), NOSPLIT, $0
+ XORQ AX, AX
+ INCL AX
+ CPUID
+ SHRQ $1, CX
+ ANDQ $1, CX
+ MOVB CX, ret+0(FP)
+ RET
+
+// func haveSSE41() bool
+TEXT ·haveSSE41(SB), NOSPLIT, $0
+ XORQ AX, AX
+ INCL AX
+ CPUID
+ SHRQ $19, CX
+ ANDQ $1, CX
+ MOVB CX, ret+0(FP)
+ RET
+
+// CRC32 polynomial data
+//
+// These constants are lifted from the
+// Linux kernel, since they avoid the costly
+// PSHUFB 16 byte reversal proposed in the
+// original Intel paper.
+DATA r2r1kp<>+0(SB)/8, $0x154442bd4
+DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
+DATA r4r3kp<>+0(SB)/8, $0x1751997d0
+DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
+DATA rupolykp<>+0(SB)/8, $0x1db710641
+DATA rupolykp<>+8(SB)/8, $0x1f7011641
+DATA r5kp<>+0(SB)/8, $0x163cd6124
+
+GLOBL r2r1kp<>(SB), RODATA, $16
+GLOBL r4r3kp<>(SB), RODATA, $16
+GLOBL rupolykp<>(SB), RODATA, $16
+GLOBL r5kp<>(SB), RODATA, $8
+
+// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+// len(p) must be at least 64, and must be a multiple of 16.
+
+// func ieeeCLMUL(crc uint32, p []byte) uint32
+TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
+ MOVL crc+0(FP), X0 // Initial CRC value
+ MOVQ p+8(FP), SI // data pointer
+ MOVQ p_len+16(FP), CX // len(p)
+
+ MOVOU (SI), X1
+ MOVOU 16(SI), X2
+ MOVOU 32(SI), X3
+ MOVOU 48(SI), X4
+ PXOR X0, X1
+ ADDQ $64, SI // buf+=64
+ SUBQ $64, CX // len-=64
+ CMPQ CX, $64 // Less than 64 bytes left
+ JB remain64
+
+ MOVOU r2r1kp<>+0(SB), X0
+
+loopback64:
+ MOVOA X1, X5
+ MOVOA X2, X6
+ MOVOA X3, X7
+ MOVOA X4, X8
+
+ PCLMULQDQ $0, X0, X1
+ PCLMULQDQ $0, X0, X2
+ PCLMULQDQ $0, X0, X3
+ PCLMULQDQ $0, X0, X4
+
+ // Load next early
+ MOVOU (SI), X11
+ MOVOU 16(SI), X12
+ MOVOU 32(SI), X13
+ MOVOU 48(SI), X14
+
+ PCLMULQDQ $0x11, X0, X5
+ PCLMULQDQ $0x11, X0, X6
+ PCLMULQDQ $0x11, X0, X7
+ PCLMULQDQ $0x11, X0, X8
+
+ PXOR X5, X1
+ PXOR X6, X2
+ PXOR X7, X3
+ PXOR X8, X4
+
+ PXOR X11, X1
+ PXOR X12, X2
+ PXOR X13, X3
+ PXOR X14, X4
+
+ ADDQ $0x40, DI
+ ADDQ $64, SI // buf+=64
+ SUBQ $64, CX // len-=64
+ CMPQ CX, $64 // Less than 64 bytes left?
+ JGE loopback64
+
+ // Fold result into a single register (X1)
+remain64:
+ MOVOU r4r3kp<>+0(SB), X0
+
+ MOVOA X1, X5
+ PCLMULQDQ $0, X0, X1
+ PCLMULQDQ $0x11, X0, X5
+ PXOR X5, X1
+ PXOR X2, X1
+
+ MOVOA X1, X5
+ PCLMULQDQ $0, X0, X1
+ PCLMULQDQ $0x11, X0, X5
+ PXOR X5, X1
+ PXOR X3, X1
+
+ MOVOA X1, X5
+ PCLMULQDQ $0, X0, X1
+ PCLMULQDQ $0x11, X0, X5
+ PXOR X5, X1
+ PXOR X4, X1
+
+ // More than 16 bytes left?
+ CMPQ CX, $16
+ JB finish
+
+ // Encode 16 bytes
+remain16:
+ MOVOU (SI), X10
+ MOVOA X1, X5
+ PCLMULQDQ $0, X0, X1
+ PCLMULQDQ $0x11, X0, X5
+ PXOR X5, X1
+ PXOR X10, X1
+ SUBQ $16, CX
+ ADDQ $16, SI
+ CMPQ CX, $16
+ JGE remain16
+
+finish:
+ // Fold final result into 32 bits and return it
+ PCMPEQB X3, X3
+ PCLMULQDQ $1, X1, X0
+ PSRLDQ $8, X1
+ PXOR X0, X1
+
+ MOVOA X1, X2
+ MOVQ r5kp<>+0(SB), X0
+
+ // Creates 32 bit mask. Note that we don't care about upper half.
+ PSRLQ $32, X3
+
+ PSRLDQ $4, X2
+ PAND X3, X1
+ PCLMULQDQ $0, X0, X1
+ PXOR X2, X1
+
+ MOVOU rupolykp<>+0(SB), X0
+
+ MOVOA X1, X2
+ PAND X3, X1
+ PCLMULQDQ $0x10, X0, X1
+ PAND X3, X1
+ PCLMULQDQ $0, X0, X1
+ PXOR X2, X1
+
+ // PEXTRD $1, X1, AX (SSE 4.1)
+ BYTE $0x66; BYTE $0x0f; BYTE $0x3a
+ BYTE $0x16; BYTE $0xc8; BYTE $0x01
+ MOVL AX, ret+32(FP)
+
+ RET
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go
new file mode 100644
index 0000000..5145398
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go
@@ -0,0 +1,39 @@
+//+build !appengine
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crc32
+
+// This file contains the code to call the SSE 4.2 version of the Castagnoli
+// CRC.
+
+// haveSSE42 is defined in crc_amd64p32.s and uses CPUID to test for 4.2
+// support.
+func haveSSE42() bool
+
+// castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32
+// instruction.
+func castagnoliSSE42(crc uint32, p []byte) uint32
+
+var sse42 = haveSSE42()
+
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+ if sse42 {
+ return castagnoliSSE42(crc, p)
+ }
+ return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+ // only use slicing-by-8 when input is >= 4KB
+ if len(p) >= 4096 {
+ iEEETable8Once.Do(func() {
+ iEEETable8 = makeTable8(IEEE)
+ })
+ return updateSlicingBy8(crc, iEEETable8, p)
+ }
+
+ return update(crc, IEEETable, p)
+}
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s
new file mode 100644
index 0000000..639c97c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s
@@ -0,0 +1,65 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define NOSPLIT 4
+#define RODATA 8
+
+// func castagnoliSSE42(crc uint32, p []byte) uint32
+TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
+ MOVL crc+0(FP), AX // CRC value
+ MOVL p+4(FP), SI // data pointer
+ MOVL p_len+8(FP), CX // len(p)
+
+ NOTL AX
+
+ // If there's less than 8 bytes to process, we do it byte-by-byte.
+ CMPQ CX, $8
+ JL cleanup
+
+ // Process individual bytes until the input is 8-byte aligned.
+startup:
+ MOVQ SI, BX
+ ANDQ $7, BX
+ JZ aligned
+
+ CRC32B (SI), AX
+ DECQ CX
+ INCQ SI
+ JMP startup
+
+aligned:
+ // The input is now 8-byte aligned and we can process 8-byte chunks.
+ CMPQ CX, $8
+ JL cleanup
+
+ CRC32Q (SI), AX
+ ADDQ $8, SI
+ SUBQ $8, CX
+ JMP aligned
+
+cleanup:
+ // We may have some bytes left over that we process one at a time.
+ CMPQ CX, $0
+ JE done
+
+ CRC32B (SI), AX
+ INCQ SI
+ DECQ CX
+ JMP cleanup
+
+done:
+ NOTL AX
+ MOVL AX, ret+16(FP)
+ RET
+
+// func haveSSE42() bool
+TEXT ·haveSSE42(SB), NOSPLIT, $0
+ XORQ AX, AX
+ INCL AX
+ CPUID
+ SHRQ $20, CX
+ ANDQ $1, CX
+ MOVB CX, ret+0(FP)
+ RET
+
diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go
new file mode 100644
index 0000000..d6f8f85
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 arm arm64 ppc64 ppc64le appengine
+
+package crc32
+
+// The file contains the generic version of updateCastagnoli which does
+// slicing-by-8, or uses the fallback for very small sizes.
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+ // only use slicing-by-8 when input is >= 16 Bytes
+ if len(p) >= 16 {
+ return updateSlicingBy8(crc, castagnoliTable8, p)
+ }
+ return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+ // only use slicing-by-8 when input is >= 16 Bytes
+ if len(p) >= 16 {
+ iEEETable8Once.Do(func() {
+ iEEETable8 = makeTable8(IEEE)
+ })
+ return updateSlicingBy8(crc, iEEETable8, p)
+ }
+ return update(crc, IEEETable, p)
+}
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/.gitignore b/Godeps/_workspace/src/github.com/kr/pretty/.gitignore
new file mode 100644
index 0000000..1f0a99f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/.gitignore
@@ -0,0 +1,4 @@
+[568].out
+_go*
+_test*
+_obj
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/License b/Godeps/_workspace/src/github.com/kr/pretty/License
new file mode 100644
index 0000000..05c783c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/License
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/Readme b/Godeps/_workspace/src/github.com/kr/pretty/Readme
new file mode 100644
index 0000000..c589fc6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/Readme
@@ -0,0 +1,9 @@
+package pretty
+
+ import "github.com/kr/pretty"
+
+ Package pretty provides pretty-printing for Go values.
+
+Documentation
+
+ http://godoc.org/github.com/kr/pretty
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/diff.go b/Godeps/_workspace/src/github.com/kr/pretty/diff.go
new file mode 100644
index 0000000..64fac64
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/diff.go
@@ -0,0 +1,148 @@
+package pretty
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type sbuf []string
+
+func (s *sbuf) Write(b []byte) (int, error) {
+ *s = append(*s, string(b))
+ return len(b), nil
+}
+
+// Diff returns a slice where each element describes
+// a difference between a and b.
+func Diff(a, b interface{}) (desc []string) {
+ Fdiff((*sbuf)(&desc), a, b)
+ return desc
+}
+
+// Fdiff writes to w a description of the differences between a and b.
+func Fdiff(w io.Writer, a, b interface{}) {
+ diffWriter{w: w}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
+}
+
+type diffWriter struct {
+ w io.Writer
+ l string // label
+}
+
+func (w diffWriter) printf(f string, a ...interface{}) {
+ var l string
+ if w.l != "" {
+ l = w.l + ": "
+ }
+ fmt.Fprintf(w.w, l+f, a...)
+}
+
+func (w diffWriter) diff(av, bv reflect.Value) {
+ if !av.IsValid() && bv.IsValid() {
+ w.printf("nil != %#v", bv.Interface())
+ return
+ }
+ if av.IsValid() && !bv.IsValid() {
+ w.printf("%#v != nil", av.Interface())
+ return
+ }
+ if !av.IsValid() && !bv.IsValid() {
+ return
+ }
+
+ at := av.Type()
+ bt := bv.Type()
+ if at != bt {
+ w.printf("%v != %v", at, bt)
+ return
+ }
+
+ // numeric types, including bool
+ if at.Kind() < reflect.Array {
+ a, b := av.Interface(), bv.Interface()
+ if a != b {
+ w.printf("%#v != %#v", a, b)
+ }
+ return
+ }
+
+ switch at.Kind() {
+ case reflect.String:
+ a, b := av.Interface(), bv.Interface()
+ if a != b {
+ w.printf("%q != %q", a, b)
+ }
+ case reflect.Ptr:
+ switch {
+ case av.IsNil() && !bv.IsNil():
+ w.printf("nil != %v", bv.Interface())
+ case !av.IsNil() && bv.IsNil():
+ w.printf("%v != nil", av.Interface())
+ case !av.IsNil() && !bv.IsNil():
+ w.diff(av.Elem(), bv.Elem())
+ }
+ case reflect.Struct:
+ for i := 0; i < av.NumField(); i++ {
+ w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
+ }
+ case reflect.Map:
+ ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
+ for _, k := range ak {
+ w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
+ w.printf("%q != (missing)", av.MapIndex(k))
+ }
+ for _, k := range both {
+ w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
+ w.diff(av.MapIndex(k), bv.MapIndex(k))
+ }
+ for _, k := range bk {
+ w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
+ w.printf("(missing) != %q", bv.MapIndex(k))
+ }
+ case reflect.Interface:
+ w.diff(reflect.ValueOf(av.Interface()), reflect.ValueOf(bv.Interface()))
+ default:
+ if !reflect.DeepEqual(av.Interface(), bv.Interface()) {
+ w.printf("%# v != %# v", Formatter(av.Interface()), Formatter(bv.Interface()))
+ }
+ }
+}
+
+func (d diffWriter) relabel(name string) (d1 diffWriter) {
+ d1 = d
+ if d.l != "" && name[0] != '[' {
+ d1.l += "."
+ }
+ d1.l += name
+ return d1
+}
+
+func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
+ for _, av := range a {
+ inBoth := false
+ for _, bv := range b {
+ if reflect.DeepEqual(av.Interface(), bv.Interface()) {
+ inBoth = true
+ both = append(both, av)
+ break
+ }
+ }
+ if !inBoth {
+ ak = append(ak, av)
+ }
+ }
+ for _, bv := range b {
+ inBoth := false
+ for _, av := range a {
+ if reflect.DeepEqual(av.Interface(), bv.Interface()) {
+ inBoth = true
+ break
+ }
+ }
+ if !inBoth {
+ bk = append(bk, bv)
+ }
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/formatter.go b/Godeps/_workspace/src/github.com/kr/pretty/formatter.go
new file mode 100644
index 0000000..c834d46
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/formatter.go
@@ -0,0 +1,309 @@
+package pretty
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "text/tabwriter"
+
+ "github.com/kr/text"
+)
+
+const (
+ limit = 50
+)
+
+type formatter struct {
+ x interface{}
+ force bool
+ quote bool
+}
+
+// Formatter makes a wrapper, f, that will format x as go source with line
+// breaks and tabs. Object f responds to the "%v" formatting verb when both the
+// "#" and " " (space) flags are set, for example:
+//
+// fmt.Sprintf("%# v", Formatter(x))
+//
+// If one of these two flags is not set, or any other verb is used, f will
+// format x according to the usual rules of package fmt.
+// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
+func Formatter(x interface{}) (f fmt.Formatter) {
+ return formatter{x: x, quote: true}
+}
+
+func (fo formatter) String() string {
+ return fmt.Sprint(fo.x) // unwrap it
+}
+
+func (fo formatter) passThrough(f fmt.State, c rune) {
+ s := "%"
+ for i := 0; i < 128; i++ {
+ if f.Flag(i) {
+ s += string(i)
+ }
+ }
+ if w, ok := f.Width(); ok {
+ s += fmt.Sprintf("%d", w)
+ }
+ if p, ok := f.Precision(); ok {
+ s += fmt.Sprintf(".%d", p)
+ }
+ s += string(c)
+ fmt.Fprintf(f, s, fo.x)
+}
+
+func (fo formatter) Format(f fmt.State, c rune) {
+ if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
+ w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
+ p := &printer{tw: w, Writer: w}
+ p.printValue(reflect.ValueOf(fo.x), true, fo.quote)
+ w.Flush()
+ return
+ }
+ fo.passThrough(f, c)
+}
+
+type printer struct {
+ io.Writer
+ tw *tabwriter.Writer
+}
+
+func (p *printer) indent() *printer {
+ q := *p
+ q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
+ q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
+ return &q
+}
+
+func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
+ if showType {
+ io.WriteString(p, v.Type().String())
+ fmt.Fprintf(p, "(%#v)", x)
+ } else {
+ fmt.Fprintf(p, "%#v", x)
+ }
+}
+
+func (p *printer) printValue(v reflect.Value, showType, quote bool) {
+ switch v.Kind() {
+ case reflect.Bool:
+ p.printInline(v, v.Bool(), showType)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.printInline(v, v.Int(), showType)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p.printInline(v, v.Uint(), showType)
+ case reflect.Float32, reflect.Float64:
+ p.printInline(v, v.Float(), showType)
+ case reflect.Complex64, reflect.Complex128:
+ fmt.Fprintf(p, "%#v", v.Complex())
+ case reflect.String:
+ p.fmtString(v.String(), quote)
+ case reflect.Map:
+ t := v.Type()
+ if showType {
+ io.WriteString(p, t.String())
+ }
+ writeByte(p, '{')
+ if nonzero(v) {
+ expand := !canInline(v.Type())
+ pp := p
+ if expand {
+ writeByte(p, '\n')
+ pp = p.indent()
+ }
+ keys := v.MapKeys()
+ for i := 0; i < v.Len(); i++ {
+ showTypeInStruct := true
+ k := keys[i]
+ mv := v.MapIndex(k)
+ pp.printValue(k, false, true)
+ writeByte(pp, ':')
+ if expand {
+ writeByte(pp, '\t')
+ }
+ showTypeInStruct = t.Elem().Kind() == reflect.Interface
+ pp.printValue(mv, showTypeInStruct, true)
+ if expand {
+ io.WriteString(pp, ",\n")
+ } else if i < v.Len()-1 {
+ io.WriteString(pp, ", ")
+ }
+ }
+ if expand {
+ pp.tw.Flush()
+ }
+ }
+ writeByte(p, '}')
+ case reflect.Struct:
+ t := v.Type()
+ if showType {
+ io.WriteString(p, t.String())
+ }
+ writeByte(p, '{')
+ if nonzero(v) {
+ expand := !canInline(v.Type())
+ pp := p
+ if expand {
+ writeByte(p, '\n')
+ pp = p.indent()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ showTypeInStruct := true
+ if f := t.Field(i); f.Name != "" {
+ io.WriteString(pp, f.Name)
+ writeByte(pp, ':')
+ if expand {
+ writeByte(pp, '\t')
+ }
+ showTypeInStruct = labelType(f.Type)
+ }
+ pp.printValue(getField(v, i), showTypeInStruct, true)
+ if expand {
+ io.WriteString(pp, ",\n")
+ } else if i < v.NumField()-1 {
+ io.WriteString(pp, ", ")
+ }
+ }
+ if expand {
+ pp.tw.Flush()
+ }
+ }
+ writeByte(p, '}')
+ case reflect.Interface:
+ switch e := v.Elem(); {
+ case e.Kind() == reflect.Invalid:
+ io.WriteString(p, "nil")
+ case e.IsValid():
+ p.printValue(e, showType, true)
+ default:
+ io.WriteString(p, v.Type().String())
+ io.WriteString(p, "(nil)")
+ }
+ case reflect.Array, reflect.Slice:
+ t := v.Type()
+ if showType {
+ io.WriteString(p, t.String())
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() && showType {
+ io.WriteString(p, "(nil)")
+ break
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() {
+ io.WriteString(p, "nil")
+ break
+ }
+ writeByte(p, '{')
+ expand := !canInline(v.Type())
+ pp := p
+ if expand {
+ writeByte(p, '\n')
+ pp = p.indent()
+ }
+ for i := 0; i < v.Len(); i++ {
+ showTypeInSlice := t.Elem().Kind() == reflect.Interface
+ pp.printValue(v.Index(i), showTypeInSlice, true)
+ if expand {
+ io.WriteString(pp, ",\n")
+ } else if i < v.Len()-1 {
+ io.WriteString(pp, ", ")
+ }
+ }
+ if expand {
+ pp.tw.Flush()
+ }
+ writeByte(p, '}')
+ case reflect.Ptr:
+ e := v.Elem()
+ if !e.IsValid() {
+ writeByte(p, '(')
+ io.WriteString(p, v.Type().String())
+ io.WriteString(p, ")(nil)")
+ } else {
+ writeByte(p, '&')
+ p.printValue(e, true, true)
+ }
+ case reflect.Chan:
+ x := v.Pointer()
+ if showType {
+ writeByte(p, '(')
+ io.WriteString(p, v.Type().String())
+ fmt.Fprintf(p, ")(%#v)", x)
+ } else {
+ fmt.Fprintf(p, "%#v", x)
+ }
+ case reflect.Func:
+ io.WriteString(p, v.Type().String())
+ io.WriteString(p, " {...}")
+ case reflect.UnsafePointer:
+ p.printInline(v, v.Pointer(), showType)
+ case reflect.Invalid:
+ io.WriteString(p, "nil")
+ }
+}
+
+func canInline(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Map:
+ return !canExpand(t.Elem())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if canExpand(t.Field(i).Type) {
+ return false
+ }
+ }
+ return true
+ case reflect.Interface:
+ return false
+ case reflect.Array, reflect.Slice:
+ return !canExpand(t.Elem())
+ case reflect.Ptr:
+ return false
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ return false
+ }
+ return true
+}
+
+func canExpand(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Map, reflect.Struct,
+ reflect.Interface, reflect.Array, reflect.Slice,
+ reflect.Ptr:
+ return true
+ }
+ return false
+}
+
+func labelType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Interface, reflect.Struct:
+ return true
+ }
+ return false
+}
+
+func (p *printer) fmtString(s string, quote bool) {
+ if quote {
+ s = strconv.Quote(s)
+ }
+ io.WriteString(p, s)
+}
+
+func tryDeepEqual(a, b interface{}) bool {
+ defer func() { recover() }()
+ return reflect.DeepEqual(a, b)
+}
+
+func writeByte(w io.Writer, b byte) {
+ w.Write([]byte{b})
+}
+
+func getField(v reflect.Value, i int) reflect.Value {
+ val := v.Field(i)
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ val = val.Elem()
+ }
+ return val
+}
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/pretty.go b/Godeps/_workspace/src/github.com/kr/pretty/pretty.go
new file mode 100644
index 0000000..d3df868
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/pretty.go
@@ -0,0 +1,98 @@
+// Package pretty provides pretty-printing for Go values. This is
+// useful during debugging, to avoid wrapping long output lines in
+// the terminal.
+//
+// It provides a function, Formatter, that can be used with any
+// function that accepts a format string. It also provides
+// convenience wrappers for functions in packages fmt and log.
+package pretty
+
+import (
+ "fmt"
+ "io"
+ "log"
+)
+
+// Errorf is a convenience wrapper for fmt.Errorf.
+//
+// Calling Errorf(f, x, y) is equivalent to
+// fmt.Errorf(f, Formatter(x), Formatter(y)).
+func Errorf(format string, a ...interface{}) error {
+ return fmt.Errorf(format, wrap(a, false)...)
+}
+
+// Fprintf is a convenience wrapper for fmt.Fprintf.
+//
+// Calling Fprintf(w, f, x, y) is equivalent to
+// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
+ return fmt.Fprintf(w, format, wrap(a, false)...)
+}
+
+// Log is a convenience wrapper for log.Printf.
+//
+// Calling Log(x, y) is equivalent to
+// log.Print(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Log(a ...interface{}) {
+ log.Print(wrap(a, true)...)
+}
+
+// Logf is a convenience wrapper for log.Printf.
+//
+// Calling Logf(f, x, y) is equivalent to
+// log.Printf(f, Formatter(x), Formatter(y)).
+func Logf(format string, a ...interface{}) {
+ log.Printf(format, wrap(a, false)...)
+}
+
+// Logln is a convenience wrapper for log.Printf.
+//
+// Calling Logln(x, y) is equivalent to
+// log.Println(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Logln(a ...interface{}) {
+ log.Println(wrap(a, true)...)
+}
+
+// Print pretty-prints its operands and writes to standard output.
+//
+// Calling Print(x, y) is equivalent to
+// fmt.Print(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Print(a ...interface{}) (n int, errno error) {
+ return fmt.Print(wrap(a, true)...)
+}
+
+// Printf is a convenience wrapper for fmt.Printf.
+//
+// Calling Printf(f, x, y) is equivalent to
+// fmt.Printf(f, Formatter(x), Formatter(y)).
+func Printf(format string, a ...interface{}) (n int, errno error) {
+ return fmt.Printf(format, wrap(a, false)...)
+}
+
+// Println pretty-prints its operands and writes to standard output.
+//
+// Calling Print(x, y) is equivalent to
+// fmt.Println(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Println(a ...interface{}) (n int, errno error) {
+ return fmt.Println(wrap(a, true)...)
+}
+
+// Sprintf is a convenience wrapper for fmt.Sprintf.
+//
+// Calling Sprintf(f, x, y) is equivalent to
+// fmt.Sprintf(f, Formatter(x), Formatter(y)).
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, wrap(a, false)...)
+}
+
+func wrap(a []interface{}, force bool) []interface{} {
+ w := make([]interface{}, len(a))
+ for i, x := range a {
+ w[i] = formatter{x: x, force: force}
+ }
+ return w
+}
diff --git a/Godeps/_workspace/src/github.com/kr/pretty/zero.go b/Godeps/_workspace/src/github.com/kr/pretty/zero.go
new file mode 100644
index 0000000..abb5b6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/pretty/zero.go
@@ -0,0 +1,41 @@
+package pretty
+
+import (
+ "reflect"
+)
+
+func nonzero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() != 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() != complex(0, 0)
+ case reflect.String:
+ return v.String() != ""
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if nonzero(getField(v, i)) {
+ return true
+ }
+ }
+ return false
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if nonzero(v.Index(i)) {
+ return true
+ }
+ }
+ return false
+ case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
+ return !v.IsNil()
+ case reflect.UnsafePointer:
+ return v.Pointer() != 0
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/kr/text/License b/Godeps/_workspace/src/github.com/kr/text/License
new file mode 100644
index 0000000..480a328
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/License
@@ -0,0 +1,19 @@
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/kr/text/Readme b/Godeps/_workspace/src/github.com/kr/text/Readme
new file mode 100644
index 0000000..7e6e7c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/Readme
@@ -0,0 +1,3 @@
+This is a Go package for manipulating paragraphs of text.
+
+See http://go.pkgdoc.org/github.com/kr/text for full documentation.
diff --git a/Godeps/_workspace/src/github.com/kr/text/colwriter/Readme b/Godeps/_workspace/src/github.com/kr/text/colwriter/Readme
new file mode 100644
index 0000000..1c1f4e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/colwriter/Readme
@@ -0,0 +1,5 @@
+Package colwriter provides a write filter that formats
+input lines in multiple columns.
+
+The package is a straightforward translation from
+/src/cmd/draw/mc.c in Plan 9 from User Space.
diff --git a/Godeps/_workspace/src/github.com/kr/text/colwriter/column.go b/Godeps/_workspace/src/github.com/kr/text/colwriter/column.go
new file mode 100644
index 0000000..7302ce9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/colwriter/column.go
@@ -0,0 +1,147 @@
+// Package colwriter provides a write filter that formats
+// input lines in multiple columns.
+//
+// The package is a straightforward translation from
+// /src/cmd/draw/mc.c in Plan 9 from User Space.
+package colwriter
+
+import (
+ "bytes"
+ "io"
+ "unicode/utf8"
+)
+
+const (
+ tab = 4
+)
+
+const (
+ // Print each input line ending in a colon ':' separately.
+ BreakOnColon uint = 1 << iota
+)
+
+// A Writer is a filter that arranges input lines in as many columns as will
+// fit in its width. Tab '\t' chars in the input are translated to sequences
+// of spaces ending at multiples of 4 positions.
+//
+// If BreakOnColon is set, each input line ending in a colon ':' is written
+// separately.
+//
+// The Writer assumes that all Unicode code points have the same width; this
+// may not be true in some fonts.
+type Writer struct {
+ w io.Writer
+ buf []byte
+ width int
+ flag uint
+}
+
+// NewWriter allocates and initializes a new Writer writing to w.
+// Parameter width controls the total number of characters on each line
+// across all columns.
+func NewWriter(w io.Writer, width int, flag uint) *Writer {
+ return &Writer{
+ w: w,
+ width: width,
+ flag: flag,
+ }
+}
+
+// Write writes p to the writer w. The only errors returned are ones
+// encountered while writing to the underlying output stream.
+func (w *Writer) Write(p []byte) (n int, err error) {
+ var linelen int
+ var lastWasColon bool
+ for i, c := range p {
+ w.buf = append(w.buf, c)
+ linelen++
+ if c == '\t' {
+ w.buf[len(w.buf)-1] = ' '
+ for linelen%tab != 0 {
+ w.buf = append(w.buf, ' ')
+ linelen++
+ }
+ }
+ if w.flag&BreakOnColon != 0 && c == ':' {
+ lastWasColon = true
+ } else if lastWasColon {
+ if c == '\n' {
+ pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'})
+ if pos < 0 {
+ pos = 0
+ }
+ line := w.buf[pos:]
+ w.buf = w.buf[:pos]
+ if err = w.columnate(); err != nil {
+ if len(line) < i {
+ return i - len(line), err
+ }
+ return 0, err
+ }
+ if n, err := w.w.Write(line); err != nil {
+ if r := len(line) - n; r < i {
+ return i - r, err
+ }
+ return 0, err
+ }
+ }
+ lastWasColon = false
+ }
+ if c == '\n' {
+ linelen = 0
+ }
+ }
+ return len(p), nil
+}
+
+// Flush should be called after the last call to Write to ensure that any data
+// buffered in the Writer is written to output.
+func (w *Writer) Flush() error {
+ return w.columnate()
+}
+
+func (w *Writer) columnate() error {
+ words := bytes.Split(w.buf, []byte{'\n'})
+ w.buf = nil
+ if len(words[len(words)-1]) == 0 {
+ words = words[:len(words)-1]
+ }
+ maxwidth := 0
+ for _, wd := range words {
+ if n := utf8.RuneCount(wd); n > maxwidth {
+ maxwidth = n
+ }
+ }
+ maxwidth++ // space char
+ wordsPerLine := w.width / maxwidth
+ if wordsPerLine <= 0 {
+ wordsPerLine = 1
+ }
+ nlines := (len(words) + wordsPerLine - 1) / wordsPerLine
+ for i := 0; i < nlines; i++ {
+ col := 0
+ endcol := 0
+ for j := i; j < len(words); j += nlines {
+ endcol += maxwidth
+ _, err := w.w.Write(words[j])
+ if err != nil {
+ return err
+ }
+ col += utf8.RuneCount(words[j])
+ if j+nlines < len(words) {
+ for col < endcol {
+ _, err := w.w.Write([]byte{' '})
+ if err != nil {
+ return err
+ }
+ col++
+ }
+ }
+ }
+ _, err := w.w.Write([]byte{'\n'})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/kr/text/doc.go b/Godeps/_workspace/src/github.com/kr/text/doc.go
new file mode 100644
index 0000000..cf4c198
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/doc.go
@@ -0,0 +1,3 @@
+// Package text provides rudimentary functions for manipulating text in
+// paragraphs.
+package text
diff --git a/Godeps/_workspace/src/github.com/kr/text/indent.go b/Godeps/_workspace/src/github.com/kr/text/indent.go
new file mode 100644
index 0000000..4ebac45
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/indent.go
@@ -0,0 +1,74 @@
+package text
+
+import (
+ "io"
+)
+
+// Indent inserts prefix at the beginning of each non-empty line of s. The
+// end-of-line marker is NL.
+func Indent(s, prefix string) string {
+ return string(IndentBytes([]byte(s), []byte(prefix)))
+}
+
+// IndentBytes inserts prefix at the beginning of each non-empty line of b.
+// The end-of-line marker is NL.
+func IndentBytes(b, prefix []byte) []byte {
+ var res []byte
+ bol := true
+ for _, c := range b {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// Writer indents each line of its input.
+type indentWriter struct {
+ w io.Writer
+ bol bool
+ pre [][]byte
+ sel int
+ off int
+}
+
+// NewIndentWriter makes a new write filter that indents the input
+// lines. Each line is prefixed in order with the corresponding
+// element of pre. If there are more lines than elements, the last
+// element of pre is repeated for each subsequent line.
+func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
+ return &indentWriter{
+ w: w,
+ pre: pre,
+ bol: true,
+ }
+}
+
+// The only errors returned are from the underlying indentWriter.
+func (w *indentWriter) Write(p []byte) (n int, err error) {
+ for _, c := range p {
+ if w.bol {
+ var i int
+ i, err = w.w.Write(w.pre[w.sel][w.off:])
+ w.off += i
+ if err != nil {
+ return n, err
+ }
+ }
+ _, err = w.w.Write([]byte{c})
+ if err != nil {
+ return n, err
+ }
+ n++
+ w.bol = c == '\n'
+ if w.bol {
+ w.off = 0
+ if w.sel < len(w.pre)-1 {
+ w.sel++
+ }
+ }
+ }
+ return n, nil
+}
diff --git a/Godeps/_workspace/src/github.com/kr/text/mc/Readme b/Godeps/_workspace/src/github.com/kr/text/mc/Readme
new file mode 100644
index 0000000..519ddc0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/mc/Readme
@@ -0,0 +1,9 @@
+Command mc prints in multiple columns.
+
+ Usage: mc [-] [-N] [file...]
+
+Mc splits the input into as many columns as will fit in N
+print positions. If the output is a tty, the default N is
+the number of characters in a terminal line; otherwise the
+default N is 80. Under option - each input line ending in
+a colon ':' is printed separately.
diff --git a/Godeps/_workspace/src/github.com/kr/text/mc/mc.go b/Godeps/_workspace/src/github.com/kr/text/mc/mc.go
new file mode 100644
index 0000000..00169a3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/mc/mc.go
@@ -0,0 +1,62 @@
+// Command mc prints in multiple columns.
+//
+// Usage: mc [-] [-N] [file...]
+//
+// Mc splits the input into as many columns as will fit in N
+// print positions. If the output is a tty, the default N is
+// the number of characters in a terminal line; otherwise the
+// default N is 80. Under option - each input line ending in
+// a colon ':' is printed separately.
+package main
+
+import (
+ "github.com/kr/pty"
+ "github.com/kr/text/colwriter"
+ "io"
+ "log"
+ "os"
+ "strconv"
+)
+
+func main() {
+ var width int
+ var flag uint
+ args := os.Args[1:]
+ for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' {
+ if len(args[0]) > 1 {
+ width, _ = strconv.Atoi(args[0][1:])
+ } else {
+ flag |= colwriter.BreakOnColon
+ }
+ args = args[1:]
+ }
+ if width < 1 {
+ _, width, _ = pty.Getsize(os.Stdout)
+ }
+ if width < 1 {
+ width = 80
+ }
+
+ w := colwriter.NewWriter(os.Stdout, width, flag)
+ if len(args) > 0 {
+ for _, s := range args {
+ if f, err := os.Open(s); err == nil {
+ copyin(w, f)
+ f.Close()
+ } else {
+ log.Println(err)
+ }
+ }
+ } else {
+ copyin(w, os.Stdin)
+ }
+}
+
+func copyin(w *colwriter.Writer, r io.Reader) {
+ if _, err := io.Copy(w, r); err != nil {
+ log.Println(err)
+ }
+ if err := w.Flush(); err != nil {
+ log.Println(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/kr/text/wrap.go b/Godeps/_workspace/src/github.com/kr/text/wrap.go
new file mode 100644
index 0000000..ca88565
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/kr/text/wrap.go
@@ -0,0 +1,86 @@
+package text
+
+import (
+ "bytes"
+ "math"
+)
+
+var (
+ nl = []byte{'\n'}
+ sp = []byte{' '}
+)
+
+const defaultPenalty = 1e5
+
+// Wrap wraps s into a paragraph of lines of length lim, with minimal
+// raggedness.
+func Wrap(s string, lim int) string {
+ return string(WrapBytes([]byte(s), lim))
+}
+
+// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
+// raggedness.
+func WrapBytes(b []byte, lim int) []byte {
+ words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
+ var lines [][]byte
+ for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
+ lines = append(lines, bytes.Join(line, sp))
+ }
+ return bytes.Join(lines, nl)
+}
+
+// WrapWords is the low-level line-breaking algorithm, useful if you need more
+// control over the details of the text wrapping process. For most uses, either
+// Wrap or WrapBytes will be sufficient and more convenient.
+//
+// WrapWords splits a list of words into lines with minimal "raggedness",
+// treating each byte as one unit, accounting for spc units between adjacent
+// words on each line, and attempting to limit lines to lim units. Raggedness
+// is the total error over all lines, where error is the square of the
+// difference of the length of the line and lim. Too-long lines (which only
+// happen when a single word is longer than lim units) have pen penalty units
+// added to the error.
+func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
+ n := len(words)
+
+ length := make([][]int, n)
+ for i := 0; i < n; i++ {
+ length[i] = make([]int, n)
+ length[i][i] = len(words[i])
+ for j := i + 1; j < n; j++ {
+ length[i][j] = length[i][j-1] + spc + len(words[j])
+ }
+ }
+
+ nbrk := make([]int, n)
+ cost := make([]int, n)
+ for i := range cost {
+ cost[i] = math.MaxInt32
+ }
+ for i := n - 1; i >= 0; i-- {
+ if length[i][n-1] <= lim {
+ cost[i] = 0
+ nbrk[i] = n
+ } else {
+ for j := i + 1; j < n; j++ {
+ d := lim - length[i][j-1]
+ c := d*d + cost[j]
+ if length[i][j-1] > lim {
+ c += pen // too-long lines get a worse penalty
+ }
+ if c < cost[i] {
+ cost[i] = c
+ nbrk[i] = j
+ }
+ }
+ }
+ }
+
+ var lines [][][]byte
+ i := 0
+ for i < n {
+ lines = append(lines, words[i:nbrk[i]])
+ i = nbrk[i]
+ }
+ return lines
+}
diff --git a/Godeps/_workspace/src/github.com/manucorporat/sse/.travis.yml b/Godeps/_workspace/src/github.com/manucorporat/sse/.travis.yml
new file mode 100644
index 0000000..3d33833
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/manucorporat/sse/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+sudo: false
+go:
+ - 1.3
+ - 1.4
+ - tip
diff --git a/Godeps/_workspace/src/github.com/manucorporat/sse/LICENSE b/Godeps/_workspace/src/github.com/manucorporat/sse/LICENSE
new file mode 100644
index 0000000..1ff7f37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/manucorporat/sse/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Manuel Martínez-Almeida
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/manucorporat/sse/README.md b/Godeps/_workspace/src/github.com/manucorporat/sse/README.md
new file mode 100644
index 0000000..4e1cf0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/manucorporat/sse/README.md
@@ -0,0 +1,54 @@
+#Server-Sent Events [![GoDoc](https://godoc.org/github.com/manucorporat/sse?status.svg)](https://godoc.org/github.com/manucorporat/sse) [![Build Status](https://travis-ci.org/manucorporat/sse.svg)](https://travis-ci.org/manucorporat/sse)
+
+Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/).
+
+- [Real world demostration using Gin](http://sse.getgin.io/)
+- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/)
+- [Browser support](http://caniuse.com/#feat=eventsource)
+
+##Sample code
+
+```go
+import "github.com/manucorporat/sse"
+
+func httpHandler(w http.ResponseWriter, req *http.Request) {
+ // data can be a primitive like a string, an integer or a float
+ sse.Encode(w, sse.Event{
+ Event: "message",
+ Data: "some data\nmore data",
+ })
+
+ // also a complex type, like a map, a struct or a slice
+ sse.Encode(w, sse.Event{
+ Id: "124",
+ Event: "message",
+ Data: map[string]interface{}{
+ "user": "manu",
+ "date": time.Now().Unix(),
+ "content": "hi!",
+ },
+ })
+}
+```
+```
+event: message
+data: some data\\nmore data
+
+id: 124
+event: message
+data: {"content":"hi!","date":1431540810,"user":"manu"}
+
+```
+
+##Content-Type
+
+```go
+fmt.Println(sse.ContentType)
+```
+```
+text/event-stream
+```
+
+##Decoding support
+
+There is a client-side implementation of SSE coming soon.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/manucorporat/sse/sse-decoder.go b/Godeps/_workspace/src/github.com/manucorporat/sse/sse-decoder.go
new file mode 100644
index 0000000..fd49b9c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/manucorporat/sse/sse-decoder.go
@@ -0,0 +1,116 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package sse
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+)
+
+type decoder struct {
+ events []Event
+}
+
+func Decode(r io.Reader) ([]Event, error) {
+ var dec decoder
+ return dec.decode(r)
+}
+
+func (d *decoder) dispatchEvent(event Event, data string) {
+ dataLength := len(data)
+ if dataLength > 0 {
+ //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer.
+ data = data[:dataLength-1]
+ dataLength--
+ }
+ if dataLength == 0 && event.Event == "" {
+ return
+ }
+ if event.Event == "" {
+ event.Event = "message"
+ }
+ event.Data = data
+ d.events = append(d.events, event)
+}
+
+func (d *decoder) decode(r io.Reader) ([]Event, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ var currentEvent Event
+ var dataBuffer *bytes.Buffer = new(bytes.Buffer)
+ // TODO (and unit tests)
+ // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair,
+ // a single U+000A LINE FEED (LF) character,
+ // or a single U+000D CARRIAGE RETURN (CR) character.
+ lines := bytes.Split(buf, []byte{'\n'})
+ for _, line := range lines {
+ if len(line) == 0 {
+ // If the line is empty (a blank line). Dispatch the event.
+ d.dispatchEvent(currentEvent, dataBuffer.String())
+
+ // reset current event and data buffer
+ currentEvent = Event{}
+ dataBuffer.Reset()
+ continue
+ }
+ if line[0] == byte(':') {
+ // If the line starts with a U+003A COLON character (:), ignore the line.
+ continue
+ }
+
+ var field, value []byte
+ colonIndex := bytes.IndexRune(line, ':')
+ if colonIndex != -1 {
+ // If the line contains a U+003A COLON character character (:)
+ // Collect the characters on the line before the first U+003A COLON character (:),
+ // and let field be that string.
+ field = line[:colonIndex]
+ // Collect the characters on the line after the first U+003A COLON character (:),
+ // and let value be that string.
+ value = line[colonIndex+1:]
+ // If value starts with a single U+0020 SPACE character, remove it from value.
+ if len(value) > 0 && value[0] == ' ' {
+ value = value[1:]
+ }
+ } else {
+ // Otherwise, the string is not empty but does not contain a U+003A COLON character character (:)
+ // Use the whole line as the field name, and the empty string as the field value.
+ field = line
+ value = []byte{}
+ }
+ // The steps to process the field given a field name and a field value depend on the field name,
+ // as given in the following list. Field names must be compared literally,
+ // with no case folding performed.
+ switch string(field) {
+ case "event":
+ // Set the event name buffer to field value.
+ currentEvent.Event = string(value)
+ case "id":
+ // Set the event stream's last event ID to the field value.
+ currentEvent.Id = string(value)
+ case "retry":
+ // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9),
+ // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer.
+ // Otherwise, ignore the field.
+ currentEvent.Id = string(value)
+ case "data":
+ // Append the field value to the data buffer,
+ dataBuffer.Write(value)
+ // then append a single U+000A LINE FEED (LF) character to the data buffer.
+ dataBuffer.WriteString("\n")
+ default:
+ //Otherwise. The field is ignored.
+ continue
+ }
+ }
+ // Once the end of the file is reached, the user agent must dispatch the event one final time.
+ d.dispatchEvent(currentEvent, dataBuffer.String())
+
+ return d.events, nil
+}
diff --git a/Godeps/_workspace/src/github.com/manucorporat/sse/sse-encoder.go b/Godeps/_workspace/src/github.com/manucorporat/sse/sse-encoder.go
new file mode 100644
index 0000000..19a385e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/manucorporat/sse/sse-encoder.go
@@ -0,0 +1,106 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package sse
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Server-Sent Events
+// W3C Working Draft 29 October 2009
+// http://www.w3.org/TR/2009/WD-eventsource-20091029/
+
+const ContentType = "text/event-stream"
+
+var contentType = []string{ContentType}
+var noCache = []string{"no-cache"}
+
+var fieldReplacer = strings.NewReplacer(
+ "\n", "\\n",
+ "\r", "\\r")
+
+var dataReplacer = strings.NewReplacer(
+ "\n", "\ndata:",
+ "\r", "\\r")
+
+type Event struct {
+ Event string
+ Id string
+ Retry uint
+ Data interface{}
+}
+
+func Encode(writer io.Writer, event Event) error {
+ w := checkWriter(writer)
+ writeId(w, event.Id)
+ writeEvent(w, event.Event)
+ writeRetry(w, event.Retry)
+ return writeData(w, event.Data)
+}
+
+func writeId(w stringWriter, id string) {
+ if len(id) > 0 {
+ w.WriteString("id:")
+ fieldReplacer.WriteString(w, id)
+ w.WriteString("\n")
+ }
+}
+
+func writeEvent(w stringWriter, event string) {
+ if len(event) > 0 {
+ w.WriteString("event:")
+ fieldReplacer.WriteString(w, event)
+ w.WriteString("\n")
+ }
+}
+
+func writeRetry(w stringWriter, retry uint) {
+ if retry > 0 {
+ w.WriteString("retry:")
+ w.WriteString(strconv.FormatUint(uint64(retry), 10))
+ w.WriteString("\n")
+ }
+}
+
+func writeData(w stringWriter, data interface{}) error {
+ w.WriteString("data:")
+ switch kindOfData(data) {
+ case reflect.Struct, reflect.Slice, reflect.Map:
+ err := json.NewEncoder(w).Encode(data)
+ if err != nil {
+ return err
+ }
+ w.WriteString("\n")
+ default:
+ dataReplacer.WriteString(w, fmt.Sprint(data))
+ w.WriteString("\n\n")
+ }
+ return nil
+}
+
+func (r Event) Render(w http.ResponseWriter) error {
+ header := w.Header()
+ header["Content-Type"] = contentType
+
+ if _, exist := header["Cache-Control"]; !exist {
+ header["Cache-Control"] = noCache
+ }
+ return Encode(w, r)
+}
+
+func kindOfData(data interface{}) reflect.Kind {
+ value := reflect.ValueOf(data)
+ valueType := value.Kind()
+ if valueType == reflect.Ptr {
+ valueType = value.Elem().Kind()
+ }
+ return valueType
+}
diff --git a/Godeps/_workspace/src/github.com/manucorporat/sse/writer.go b/Godeps/_workspace/src/github.com/manucorporat/sse/writer.go
new file mode 100644
index 0000000..6f9806c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/manucorporat/sse/writer.go
@@ -0,0 +1,24 @@
+package sse
+
+import "io"
+
+type stringWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+}
+
+type stringWrapper struct {
+ io.Writer
+}
+
+func (w stringWrapper) WriteString(str string) (int, error) {
+ return w.Writer.Write([]byte(str))
+}
+
+func checkWriter(writer io.Writer) stringWriter {
+ if w, ok := writer.(stringWriter); ok {
+ return w
+ } else {
+ return stringWrapper{writer}
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/LICENSE b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 0000000..13f15df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2013 Matt T. Proud
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000..66d9b54
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000..c318385
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000..4b76ea9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/.gitignore b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/.gitignore
new file mode 100644
index 0000000..8000dd9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/.gitignore
@@ -0,0 +1 @@
+.vagrant
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
new file mode 100644
index 0000000..f1880c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
@@ -0,0 +1,60 @@
+## How to Contribute ##
+
+We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
+
+### Reporting issues ###
+
+We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
+
+If you find a bug:
+
+* Use the GitHub issue search to check whether the bug has already been reported.
+* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
+* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
+
+### Pull requests ###
+
+We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
+
+[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
+
+ $ git clone git@github.com:/go-zfs.git
+ $ cd go-zfs
+ $ git remote add upstream https://github.com/mistifyio/go-zfs.git
+
+If you need to pull new changes committed upstream:
+
+ $ git checkout master
+ $ git fetch upstream
+ $ git merge upstream/master
+
+Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
+
+ $ git checkout -b
+
+Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
+
+ $ git commit -m "Issue # - "
+
+Push your feature branch to your fork.
+
+ $ git push origin
+
+[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
+
+* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/).
+* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
+
+**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
+
+### Go Tools ###
+For consistency and to catch minor issues for all of go code, please run the following:
+* goimports
+* go vet
+* golint
+* errcheck
+
+Many editors can execute the above on save.
+
+----
+Guidelines based on http://azkaban.github.io/contributing.html
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/LICENSE b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/LICENSE
new file mode 100644
index 0000000..f4c265c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (c) 2014, OmniTI Computer Consulting, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/README.md b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/README.md
new file mode 100644
index 0000000..2515e58
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/README.md
@@ -0,0 +1,54 @@
+# Go Wrapper for ZFS #
+
+Simple wrappers for ZFS command line tools.
+
+[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs)
+
+## Requirements ##
+
+You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
+
+ sudo apt-get install python-software-properties
+ sudo apt-add-repository ppa:zfs-native/stable
+ sudo apt-get update
+ sudo apt-get install ubuntu-zfs libzfs-dev
+
+Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install
+
+Generally you need root privileges to use anything zfs related.
+
+## Status ##
+
+This has been only been tested on Ubuntu 14.04
+
+In the future, we hope to work directly with libzfs.
+
+# Hacking #
+
+The tests have decent examples for most functions.
+
+```go
+//assuming a zpool named test
+//error handling ommitted
+
+
+f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ok(t, err)
+
+s, err := f.Snapshot("test", nil)
+ok(t, err)
+
+// snapshot is named "test/snapshot-test@test"
+
+c, err := s.Clone("test/clone-test", nil)
+
+err := c.Destroy()
+err := s.Destroy()
+err := f.Destroy()
+
+```
+
+# Contributing #
+
+See the [contributing guidelines](./CONTRIBUTING.md)
+
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/Vagrantfile b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/Vagrantfile
new file mode 100644
index 0000000..3bd6e12
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/Vagrantfile
@@ -0,0 +1,34 @@
+
+VAGRANTFILE_API_VERSION = "2"
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+ config.ssh.forward_agent = true
+
+ config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
+
+ config.vm.provision "shell", inline: < /etc/profile.d/go.sh
+export GOPATH=\\$HOME/go
+export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH
+END
+
+chown -R vagrant /home/vagrant/go
+
+apt-get update
+apt-get install -y software-properties-common curl
+apt-add-repository --yes ppa:zfs-native/stable
+apt-get update
+apt-get install -y ubuntu-zfs
+
+cd /home/vagrant
+curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
+tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz
+
+cat << END > /etc/sudoers.d/go
+Defaults env_keep += "GOPATH"
+END
+
+EOF
+
+end
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/error.go b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/error.go
new file mode 100644
index 0000000..5408ccd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/error.go
@@ -0,0 +1,18 @@
+package zfs
+
+import (
+ "fmt"
+)
+
+// Error is an error which is returned when the `zfs` or `zpool` shell
+// commands return with a non-zero exit code.
+type Error struct {
+ Err error
+ Debug string
+ Stderr string
+}
+
+// Error returns the string representation of an Error.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr)
+}
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/utils.go b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/utils.go
new file mode 100644
index 0000000..404ab2b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/utils.go
@@ -0,0 +1,323 @@
+package zfs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/pborman/uuid"
+)
+
+type command struct {
+ Command string
+ Stdin io.Reader
+ Stdout io.Writer
+}
+
+func (c *command) Run(arg ...string) ([][]string, error) {
+
+ cmd := exec.Command(c.Command, arg...)
+
+ var stdout, stderr bytes.Buffer
+
+ if c.Stdout == nil {
+ cmd.Stdout = &stdout
+ } else {
+ cmd.Stdout = c.Stdout
+ }
+
+ if c.Stdin != nil {
+ cmd.Stdin = c.Stdin
+
+ }
+ cmd.Stderr = &stderr
+
+ id := uuid.New()
+ joinedArgs := strings.Join(cmd.Args, " ")
+
+ logger.Log([]string{"ID:" + id, "START", joinedArgs})
+ err := cmd.Run()
+ logger.Log([]string{"ID:" + id, "FINISH"})
+
+ if err != nil {
+ return nil, &Error{
+ Err: err,
+ Debug: strings.Join([]string{cmd.Path, joinedArgs}, " "),
+ Stderr: stderr.String(),
+ }
+ }
+
+ // assume if you passed in something for stdout, that you know what to do with it
+ if c.Stdout != nil {
+ return nil, nil
+ }
+
+ lines := strings.Split(stdout.String(), "\n")
+
+ //last line is always blank
+ lines = lines[0 : len(lines)-1]
+ output := make([][]string, len(lines))
+
+ for i, l := range lines {
+ output[i] = strings.Fields(l)
+ }
+
+ return output, nil
+}
+
+func setString(field *string, value string) {
+ v := ""
+ if value != "-" {
+ v = value
+ }
+ *field = v
+}
+
+func setUint(field *uint64, value string) error {
+ var v uint64
+ if value != "-" {
+ var err error
+ v, err = strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ *field = v
+ return nil
+}
+
+func (ds *Dataset) parseLine(line []string) error {
+ prop := line[1]
+ val := line[2]
+
+ var err error
+
+ switch prop {
+ case "available":
+ err = setUint(&ds.Avail, val)
+ case "compression":
+ setString(&ds.Compression, val)
+ case "mountpoint":
+ setString(&ds.Mountpoint, val)
+ case "quota":
+ err = setUint(&ds.Quota, val)
+ case "type":
+ setString(&ds.Type, val)
+ case "origin":
+ setString(&ds.Origin, val)
+ case "used":
+ err = setUint(&ds.Used, val)
+ case "volsize":
+ err = setUint(&ds.Volsize, val)
+ case "written":
+ err = setUint(&ds.Written, val)
+ case "logicalused":
+ err = setUint(&ds.Logicalused, val)
+ }
+ return err
+}
+
+/*
+ * from zfs diff`s escape function:
+ *
+ * Prints a file name out a character at a time. If the character is
+ * not in the range of what we consider "printable" ASCII, display it
+ * as an escaped 3-digit octal value. ASCII values less than a space
+ * are all control characters and we declare the upper end as the
+ * DELete character. This also is the last 7-bit ASCII character.
+ * We choose to treat all 8-bit ASCII as not printable for this
+ * application.
+ */
+func unescapeFilepath(path string) (string, error) {
+ buf := make([]byte, 0, len(path))
+ llen := len(path)
+ for i := 0; i < llen; {
+ if path[i] == '\\' {
+ if llen < i+4 {
+ return "", fmt.Errorf("Invalid octal code: too short")
+ }
+ octalCode := path[(i + 1):(i + 4)]
+ val, err := strconv.ParseUint(octalCode, 8, 8)
+ if err != nil {
+ return "", fmt.Errorf("Invalid octal code: %v", err)
+ }
+ buf = append(buf, byte(val))
+ i += 4
+ } else {
+ buf = append(buf, path[i])
+ i++
+ }
+ }
+ return string(buf), nil
+}
+
+var changeTypeMap = map[string]ChangeType{
+ "-": Removed,
+ "+": Created,
+ "M": Modified,
+ "R": Renamed,
+}
+var inodeTypeMap = map[string]InodeType{
+ "B": BlockDevice,
+ "C": CharacterDevice,
+ "/": Directory,
+ ">": Door,
+ "|": NamedPipe,
+ "@": SymbolicLink,
+ "P": EventPort,
+ "=": Socket,
+ "F": File,
+}
+
+// matches (+1) or (-1)
+var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
+
+func parseReferenceCount(field string) (int, error) {
+ matches := referenceCountRegex.FindStringSubmatch(field)
+ if matches == nil {
+ return 0, fmt.Errorf("Regexp does not match")
+ }
+ return strconv.Atoi(matches[1])
+}
+
+func parseInodeChange(line []string) (*InodeChange, error) {
+ llen := len(line)
+ if llen < 1 {
+ return nil, fmt.Errorf("Empty line passed")
+ }
+
+ changeType := changeTypeMap[line[0]]
+ if changeType == 0 {
+ return nil, fmt.Errorf("Unknown change type '%s'", line[0])
+ }
+
+ switch changeType {
+ case Renamed:
+ if llen != 4 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
+ }
+ case Modified:
+ if llen != 4 && llen != 3 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
+ }
+ default:
+ if llen != 3 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
+ }
+ }
+
+ inodeType := inodeTypeMap[line[1]]
+ if inodeType == 0 {
+ return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
+ }
+
+ path, err := unescapeFilepath(line[2])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ }
+
+ var newPath string
+ var referenceCount int
+ switch changeType {
+ case Renamed:
+ newPath, err = unescapeFilepath(line[3])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ }
+ case Modified:
+ if llen == 4 {
+ referenceCount, err = parseReferenceCount(line[3])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse reference count: %v", err)
+ }
+ }
+ default:
+ newPath = ""
+ }
+
+ return &InodeChange{
+ Change: changeType,
+ Type: inodeType,
+ Path: path,
+ NewPath: newPath,
+ ReferenceCountChange: referenceCount,
+ }, nil
+}
+
+// example input
+//M / /testpool/bar/
+//+ F /testpool/bar/hello.txt
+//M / /testpool/bar/hello.txt (+1)
+//M / /testpool/bar/hello-hardlink
+func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
+ changes := make([]*InodeChange, len(lines))
+
+ for i, line := range lines {
+ c, err := parseInodeChange(line)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
+ }
+ changes[i] = c
+ }
+ return changes, nil
+}
+
+func listByType(t, filter string) ([]*Dataset, error) {
+ args := []string{"get", "-rHp", "-t", t, "all"}
+ if filter != "" {
+ args = append(args, filter)
+ }
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var datasets []*Dataset
+
+ name := ""
+ var ds *Dataset
+ for _, line := range out {
+ if name != line[0] {
+ name = line[0]
+ ds = &Dataset{Name: name}
+ datasets = append(datasets, ds)
+ }
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return datasets, nil
+}
+
+func propsSlice(properties map[string]string) []string {
+ args := make([]string, 0, len(properties)*3)
+ for k, v := range properties {
+ args = append(args, "-o")
+ args = append(args, fmt.Sprintf("%s=%s", k, v))
+ }
+ return args
+}
+
+func (z *Zpool) parseLine(line []string) error {
+ prop := line[1]
+ val := line[2]
+
+ var err error
+
+ switch prop {
+ case "health":
+ setString(&z.Health, val)
+ case "allocated":
+ err = setUint(&z.Allocated, val)
+ case "size":
+ err = setUint(&z.Size, val)
+ case "free":
+ err = setUint(&z.Free, val)
+ }
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/zfs.go b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/zfs.go
new file mode 100644
index 0000000..3d0dd66
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/zfs.go
@@ -0,0 +1,390 @@
+// Package zfs provides wrappers around the ZFS command line tools.
+package zfs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// ZFS dataset types, which can indicate if a dataset is a filesystem,
+// snapshot, or volume.
+const (
+ DatasetFilesystem = "filesystem"
+ DatasetSnapshot = "snapshot"
+ DatasetVolume = "volume"
+)
+
+// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot,
+// or volume. The Type struct member can be used to determine a dataset's type.
+//
+// The field definitions can be found in the ZFS manual:
+// http://www.freebsd.org/cgi/man.cgi?zfs(8).
+type Dataset struct {
+ Name string
+ Origin string
+ Used uint64
+ Avail uint64
+ Mountpoint string
+ Compression string
+ Type string
+ Written uint64
+ Volsize uint64
+ Usedbydataset uint64
+ Logicalused uint64
+ Quota uint64
+}
+
+// InodeType is the type of inode as reported by Diff
+type InodeType int
+
+// Types of Inodes
+const (
+ _ = iota // 0 == unknown type
+ BlockDevice InodeType = iota
+ CharacterDevice
+ Directory
+ Door
+ NamedPipe
+ SymbolicLink
+ EventPort
+ Socket
+ File
+)
+
+// ChangeType is the type of inode change as reported by Diff
+type ChangeType int
+
+// Types of Changes
+const (
+ _ = iota // 0 == unknown type
+ Removed ChangeType = iota
+ Created
+ Modified
+ Renamed
+)
+
+// DestroyFlag is the options flag passed to Destroy
+type DestroyFlag int
+
+// Valid destroy options
+const (
+ DestroyDefault DestroyFlag = 1 << iota
+ DestroyRecursive = 1 << iota
+ DestroyRecursiveClones = 1 << iota
+ DestroyDeferDeletion = 1 << iota
+ DestroyForceUmount = 1 << iota
+)
+
+// InodeChange represents a change as reported by Diff
+type InodeChange struct {
+ Change ChangeType
+ Type InodeType
+ Path string
+ NewPath string
+ ReferenceCountChange int
+}
+
+// Logger can be used to log commands/actions
+type Logger interface {
+ Log(cmd []string)
+}
+
+type defaultLogger struct{}
+
+func (*defaultLogger) Log(cmd []string) {
+ return
+}
+
+var logger Logger = &defaultLogger{}
+
+// SetLogger set a log handler to log all commands including arguments before
+// they are executed
+func SetLogger(l Logger) {
+ if l != nil {
+ logger = l
+ }
+}
+
+// zfs is a helper function to wrap typical calls to zfs.
+func zfs(arg ...string) ([][]string, error) {
+ c := command{Command: "zfs"}
+ return c.Run(arg...)
+}
+
+// Datasets returns a slice of ZFS datasets, regardless of type.
+// A filter argument may be passed to select a dataset with the matching name,
+// or empty string ("") may be used to select all datasets.
+func Datasets(filter string) ([]*Dataset, error) {
+ return listByType("all", filter)
+}
+
+// Snapshots returns a slice of ZFS snapshots.
+// A filter argument may be passed to select a snapshot with the matching name,
+// or empty string ("") may be used to select all snapshots.
+func Snapshots(filter string) ([]*Dataset, error) {
+ return listByType(DatasetSnapshot, filter)
+}
+
+// Filesystems returns a slice of ZFS filesystems.
+// A filter argument may be passed to select a filesystem with the matching name,
+// or empty string ("") may be used to select all filesystems.
+func Filesystems(filter string) ([]*Dataset, error) {
+ return listByType(DatasetFilesystem, filter)
+}
+
+// Volumes returns a slice of ZFS volumes.
+// A filter argument may be passed to select a volume with the matching name,
+// or empty string ("") may be used to select all volumes.
+func Volumes(filter string) ([]*Dataset, error) {
+ return listByType(DatasetVolume, filter)
+}
+
+// GetDataset retrieves a single ZFS dataset by name. This dataset could be
+// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
+func GetDataset(name string) (*Dataset, error) {
+ out, err := zfs("get", "-Hp", "all", name)
+ if err != nil {
+ return nil, err
+ }
+
+ ds := &Dataset{Name: name}
+ for _, line := range out {
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return ds, nil
+}
+
+// Clone clones a ZFS snapshot and returns a clone dataset.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) {
+ if d.Type != DatasetSnapshot {
+ return nil, errors.New("can only clone snapshots")
+ }
+ args := make([]string, 2, 4)
+ args[0] = "clone"
+ args[1] = "-p"
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+ args = append(args, []string{d.Name, dest}...)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(dest)
+}
+
+// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
+// new snapshot with the specified name, and streams the input data into the
+// newly-created snapshot.
+func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
+ c := command{Command: "zfs", Stdin: input}
+ _, err := c.Run("receive", name)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) SendSnapshot(output io.Writer) error {
+ if d.Type != DatasetSnapshot {
+ return errors.New("can only send snapshots")
+ }
+
+ c := command{Command: "zfs", Stdout: output}
+ _, err := c.Run("send", d.Name)
+ return err
+}
+
+// CreateVolume creates a new ZFS volume with the specified name, size, and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
+ args := make([]string, 4, 5)
+ args[0] = "create"
+ args[1] = "-p"
+ args[2] = "-V"
+ args[3] = strconv.FormatUint(size, 10)
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+ args = append(args, name)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
+// descendents of the dataset will be recursively destroyed, including snapshots.
+// If the deferred bit flag is set, the snapshot is marked for deferred
+// deletion.
+func (d *Dataset) Destroy(flags DestroyFlag) error {
+ args := make([]string, 1, 3)
+ args[0] = "destroy"
+ if flags&DestroyRecursive != 0 {
+ args = append(args, "-r")
+ }
+
+ if flags&DestroyRecursiveClones != 0 {
+ args = append(args, "-R")
+ }
+
+ if flags&DestroyDeferDeletion != 0 {
+ args = append(args, "-d")
+ }
+
+ if flags&DestroyForceUmount != 0 {
+ args = append(args, "-f")
+ }
+
+ args = append(args, d.Name)
+ _, err := zfs(args...)
+ return err
+}
+
+// SetProperty sets a ZFS property on the receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) SetProperty(key, val string) error {
+ prop := strings.Join([]string{key, val}, "=")
+ _, err := zfs("set", prop, d.Name)
+ return err
+}
+
+// GetProperty returns the current value of a ZFS property from the
+// receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) GetProperty(key string) (string, error) {
+ out, err := zfs("get", key, d.Name)
+ if err != nil {
+ return "", err
+ }
+
+ return out[0][2], nil
+}
+
+// Snapshots returns a slice of all ZFS snapshots of a given dataset.
+func (d *Dataset) Snapshots() ([]*Dataset, error) {
+ return Snapshots(d.Name)
+}
+
+// CreateFilesystem creates a new ZFS filesystem with the specified name and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
+ args := make([]string, 1, 4)
+ args[0] = "create"
+
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+
+ args = append(args, name)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
+// specified name. Optionally, the snapshot can be taken recursively, creating
+// snapshots of all descendent filesystems in a single, atomic operation.
+func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
+ args := make([]string, 1, 4)
+ args[0] = "snapshot"
+ if recursive {
+ args = append(args, "-r")
+ }
+ snapName := fmt.Sprintf("%s@%s", d.Name, name)
+ args = append(args, snapName)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(snapName)
+}
+
+// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
+// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot
+// rollback cannot be completed without this option, if more recent
+// snapshots exist.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Rollback(destroyMoreRecent bool) error {
+ if d.Type != DatasetSnapshot {
+ return errors.New("can only rollback snapshots")
+ }
+
+ args := make([]string, 1, 3)
+ args[0] = "rollback"
+ if destroyMoreRecent {
+ args = append(args, "-r")
+ }
+ args = append(args, d.Name)
+
+ _, err := zfs(args...)
+ return err
+}
+
+// Children returns a slice of children of the receiving ZFS dataset.
+// A recursion depth may be specified, or a depth of 0 allows unlimited
+// recursion.
+func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
+ args := []string{"get", "-t", "all", "-Hp", "all"}
+ if depth > 0 {
+ args = append(args, "-d")
+ args = append(args, strconv.FormatUint(depth, 10))
+ } else {
+ args = append(args, "-r")
+ }
+ args = append(args, d.Name)
+
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var datasets []*Dataset
+ name := ""
+ var ds *Dataset
+ for _, line := range out {
+ if name != line[0] {
+ name = line[0]
+ ds = &Dataset{Name: name}
+ datasets = append(datasets, ds)
+ }
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+ return datasets[1:], nil
+}
+
+// Diff returns changes between a snapshot and the given ZFS dataset.
+// The snapshot name must include the filesystem part as it is possible to
+// compare clones with their origin snapshots.
+func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
+ args := []string{"diff", "-FH", snapshot, d.Name}[:]
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ inodeChanges, err := parseInodeChanges(out)
+ if err != nil {
+ return nil, err
+ }
+ return inodeChanges, nil
+}
diff --git a/Godeps/_workspace/src/github.com/mistifyio/go-zfs/zpool.go b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/zpool.go
new file mode 100644
index 0000000..6ba52d3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/mistifyio/go-zfs/zpool.go
@@ -0,0 +1,105 @@
+package zfs
+
+// ZFS zpool states, which can indicate if a pool is online, offline,
+// degraded, etc. More information regarding zpool states can be found here:
+// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
+const (
+ ZpoolOnline = "ONLINE"
+ ZpoolDegraded = "DEGRADED"
+ ZpoolFaulted = "FAULTED"
+ ZpoolOffline = "OFFLINE"
+ ZpoolUnavail = "UNAVAIL"
+ ZpoolRemoved = "REMOVED"
+)
+
+// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can
+// contain many descendent datasets.
+type Zpool struct {
+ Name string
+ Health string
+ Allocated uint64
+ Size uint64
+ Free uint64
+}
+
+// zpool is a helper function to wrap typical calls to zpool.
+func zpool(arg ...string) ([][]string, error) {
+ c := command{Command: "zpool"}
+ return c.Run(arg...)
+}
+
+// GetZpool retrieves a single ZFS zpool by name.
+func GetZpool(name string) (*Zpool, error) {
+ out, err := zpool("get", "all", "-p", name)
+ if err != nil {
+ return nil, err
+ }
+
+ // there is no -H
+ out = out[1:]
+
+ z := &Zpool{Name: name}
+ for _, line := range out {
+ if err := z.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return z, nil
+}
+
+// Datasets returns a slice of all ZFS datasets in a zpool.
+func (z *Zpool) Datasets() ([]*Dataset, error) {
+ return Datasets(z.Name)
+}
+
+// Snapshots returns a slice of all ZFS snapshots in a zpool.
+func (z *Zpool) Snapshots() ([]*Dataset, error) {
+ return Snapshots(z.Name)
+}
+
+// CreateZpool creates a new ZFS zpool with the specified name, properties,
+// and optional arguments.
+// A full list of available ZFS properties and command-line arguments may be
+// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
+ cli := make([]string, 1, 4)
+ cli[0] = "create"
+ if properties != nil {
+ cli = append(cli, propsSlice(properties)...)
+ }
+ cli = append(cli, name)
+ cli = append(cli, args...)
+ _, err := zpool(cli...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Zpool{Name: name}, nil
+}
+
+// Destroy destroys a ZFS zpool by name.
+func (z *Zpool) Destroy() error {
+ _, err := zpool("destroy", z.Name)
+ return err
+}
+
+// ListZpools list all ZFS zpools accessible on the current system.
+func ListZpools() ([]*Zpool, error) {
+ args := []string{"list", "-Ho", "name"}
+ out, err := zpool(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var pools []*Zpool
+
+ for _, line := range out {
+ z, err := GetZpool(line[0])
+ if err != nil {
+ return nil, err
+ }
+ pools = append(pools, z)
+ }
+ return pools, nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE b/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 0000000..2744858
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE b/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE
new file mode 100644
index 0000000..5c97abc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/NOTICE
@@ -0,0 +1,17 @@
+runc
+
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (http://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see http://www.bis.doc.gov
+
+See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/README.md b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/README.md
new file mode 100644
index 0000000..fc6b4b0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/README.md
@@ -0,0 +1,238 @@
+Libcontainer provides a native Go implementation for creating containers
+with namespaces, cgroups, capabilities, and filesystem access controls.
+It allows you to manage the lifecycle of the container performing additional operations
+after the container is created.
+
+
+#### Container
+A container is a self contained execution environment that shares the kernel of the
+host system and which is (optionally) isolated from other containers in the system.
+
+#### Using libcontainer
+
+Because containers are spawned in a two step process you will need a binary that
+will be executed as the init process for the container. In libcontainer, we use
+the current binary (/proc/self/exe) to be executed as the init process, and use
+arg "init", we call the first step process "bootstrap", so you always need a "init"
+function as the entry of "bootstrap".
+
+```go
+func init() {
+ if len(os.Args) > 1 && os.Args[1] == "init" {
+ runtime.GOMAXPROCS(1)
+ runtime.LockOSThread()
+ factory, _ := libcontainer.New("")
+ if err := factory.StartInitialization(); err != nil {
+ logrus.Fatal(err)
+ }
+ panic("--this line should have never been executed, congratulations--")
+ }
+}
+```
+
+Then to create a container you first have to initialize an instance of a factory
+that will handle the creation and initialization for a container.
+
+```go
+factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
+if err != nil {
+ logrus.Fatal(err)
+ return
+}
+```
+
+Once you have an instance of the factory created we can create a configuration
+struct describing how the container is to be created. A sample would look similar to this:
+
+```go
+defaultMountFlags := syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
+config := &configs.Config{
+ Rootfs: "/your/path/to/rootfs",
+ Capabilities: []string{
+ "CAP_CHOWN",
+ "CAP_DAC_OVERRIDE",
+ "CAP_FSETID",
+ "CAP_FOWNER",
+ "CAP_MKNOD",
+ "CAP_NET_RAW",
+ "CAP_SETGID",
+ "CAP_SETUID",
+ "CAP_SETFCAP",
+ "CAP_SETPCAP",
+ "CAP_NET_BIND_SERVICE",
+ "CAP_SYS_CHROOT",
+ "CAP_KILL",
+ "CAP_AUDIT_WRITE",
+ },
+ Namespaces: configs.Namespaces([]configs.Namespace{
+ {Type: configs.NEWNS},
+ {Type: configs.NEWUTS},
+ {Type: configs.NEWIPC},
+ {Type: configs.NEWPID},
+ {Type: configs.NEWUSER},
+ {Type: configs.NEWNET},
+ }),
+ Cgroups: &configs.Cgroup{
+ Name: "test-container",
+ Parent: "system",
+ Resources: &configs.Resources{
+ MemorySwappiness: -1,
+ AllowAllDevices: false,
+ AllowedDevices: configs.DefaultAllowedDevices,
+ },
+ },
+ MaskPaths: []string{
+ "/proc/kcore",
+ },
+ ReadonlyPaths: []string{
+ "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
+ },
+ Devices: configs.DefaultAutoCreatedDevices,
+ Hostname: "testing",
+ Mounts: []*configs.Mount{
+ {
+ Source: "proc",
+ Destination: "/proc",
+ Device: "proc",
+ Flags: defaultMountFlags,
+ },
+ {
+ Source: "tmpfs",
+ Destination: "/dev",
+ Device: "tmpfs",
+ Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME,
+ Data: "mode=755",
+ },
+ {
+ Source: "devpts",
+ Destination: "/dev/pts",
+ Device: "devpts",
+ Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC,
+ Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
+ },
+ {
+ Device: "tmpfs",
+ Source: "shm",
+ Destination: "/dev/shm",
+ Data: "mode=1777,size=65536k",
+ Flags: defaultMountFlags,
+ },
+ {
+ Source: "mqueue",
+ Destination: "/dev/mqueue",
+ Device: "mqueue",
+ Flags: defaultMountFlags,
+ },
+ {
+ Source: "sysfs",
+ Destination: "/sys",
+ Device: "sysfs",
+ Flags: defaultMountFlags | syscall.MS_RDONLY,
+ },
+ },
+ UidMappings: []configs.IDMap{
+ {
+ ContainerID: 0,
+ Host: 1000,
+ size: 65536,
+ },
+ },
+ GidMappings: []configs.IDMap{
+ {
+ ContainerID: 0,
+ Host: 1000,
+ size: 65536,
+ },
+ },
+ Networks: []*configs.Network{
+ {
+ Type: "loopback",
+ Address: "127.0.0.1/0",
+ Gateway: "localhost",
+ },
+ },
+ Rlimits: []configs.Rlimit{
+ {
+ Type: syscall.RLIMIT_NOFILE,
+ Hard: uint64(1025),
+ Soft: uint64(1025),
+ },
+ },
+}
+```
+
+Once you have the configuration populated you can create a container:
+
+```go
+container, err := factory.Create("container-id", config)
+if err != nil {
+ logrus.Fatal(err)
+ return
+}
+```
+
+To spawn bash as the initial process inside the container and have the
+processes pid returned in order to wait, signal, or kill the process:
+
+```go
+process := &libcontainer.Process{
+ Args: []string{"/bin/bash"},
+ Env: []string{"PATH=/bin"},
+ User: "daemon",
+ Stdin: os.Stdin,
+ Stdout: os.Stdout,
+ Stderr: os.Stderr,
+}
+
+err := container.Start(process)
+if err != nil {
+ logrus.Fatal(err)
+ container.Destroy()
+ return
+}
+
+// wait for the process to finish.
+_, err := process.Wait()
+if err != nil {
+ logrus.Fatal(err)
+}
+
+// destroy the container.
+container.Destroy()
+```
+
+Additional ways to interact with a running container are:
+
+```go
+// return all the pids for all processes running inside the container.
+processes, err := container.Processes()
+
+// get detailed cpu, memory, io, and network statistics for the container and
+// it's processes.
+stats, err := container.Stats()
+
+// pause all processes inside the container.
+container.Pause()
+
+// resume all paused processes.
+container.Resume()
+```
+
+
+#### Checkpoint & Restore
+
+libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
+This let's you save the state of a process running inside a container to disk, and then restore
+that state into a new process, on the same machine or on another machine.
+
+`criu` version 1.5.2 or higher is required to use checkpoint and restore.
+If you don't already have `criu` installed, you can build it from source, following the
+[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
+generated when building libcontainer with docker.
+
+
+## Copyright and license
+
+Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
+Docs released under Creative commons.
+
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/SPEC.md b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/SPEC.md
new file mode 100644
index 0000000..6151112
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/SPEC.md
@@ -0,0 +1,334 @@
+## Container Specification - v1
+
+This is the standard configuration for version 1 containers. It includes
+namespaces, standard filesystem setup, a default Linux capability set, and
+information about resource reservations. It also has information about any
+populated environment settings for the processes running inside a container.
+
+Along with the configuration of how a container is created the standard also
+discusses actions that can be performed on a container to manage and inspect
+information about the processes running inside.
+
+The v1 profile is meant to be able to accommodate the majority of applications
+with a strong security configuration.
+
+### System Requirements and Compatibility
+
+Minimum requirements:
+* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
+* Mounted cgroups with each subsystem in its own hierarchy
+
+
+### Namespaces
+
+| Flag | Enabled |
+| ------------ | ------- |
+| CLONE_NEWPID | 1 |
+| CLONE_NEWUTS | 1 |
+| CLONE_NEWIPC | 1 |
+| CLONE_NEWNET | 1 |
+| CLONE_NEWNS | 1 |
+| CLONE_NEWUSER | 1 |
+
+Namespaces are created for the container via the `clone` syscall.
+
+
+### Filesystem
+
+A root filesystem must be provided to a container for execution. The container
+will use this root filesystem (rootfs) to jail and spawn processes inside where
+the binaries and system libraries are local to that directory. Any binaries
+to be executed must be contained within this rootfs.
+
+Mounts that happen inside the container are automatically cleaned up when the
+container exits as the mount namespace is destroyed and the kernel will
+unmount all the mounts that were setup within that namespace.
+
+For a container to execute properly there are certain filesystems that
+are required to be mounted within the rootfs that the runtime will setup.
+
+| Path | Type | Flags | Data |
+| ----------- | ------ | -------------------------------------- | ---------------------------------------- |
+| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
+| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 |
+| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k |
+| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
+| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 |
+| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | |
+
+
+After a container's filesystems are mounted within the newly created
+mount namespace `/dev` will need to be populated with a set of device nodes.
+It is expected that a rootfs does not need to have any device nodes specified
+for `/dev` within the rootfs as the container will setup the correct devices
+that are required for executing a container's process.
+
+| Path | Mode | Access |
+| ------------ | ---- | ---------- |
+| /dev/null | 0666 | rwm |
+| /dev/zero | 0666 | rwm |
+| /dev/full | 0666 | rwm |
+| /dev/tty | 0666 | rwm |
+| /dev/random | 0666 | rwm |
+| /dev/urandom | 0666 | rwm |
+| /dev/fuse | 0666 | rwm |
+
+
+**ptmx**
+`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within
+the container.
+
+The use of a pseudo TTY is optional within a container and it should support both.
+If a pseudo is provided to the container `/dev/console` will need to be
+setup by binding the console in `/dev/` after it has been populated and mounted
+in tmpfs.
+
+| Source | Destination | UID GID | Mode | Type |
+| --------------- | ------------ | ------- | ---- | ---- |
+| *pty host path* | /dev/console | 0 0 | 0600 | bind |
+
+
+After `/dev/null` has been setup we check for any external links between
+the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing
+to `/dev/null` outside the container we close and `dup2` the the `/dev/null`
+that is local to the container's rootfs.
+
+
+After the container has `/proc` mounted a few standard symlinks are setup
+within `/dev/` for the io.
+
+| Source | Destination |
+| --------------- | ----------- |
+| /proc/self/fd | /dev/fd |
+| /proc/self/fd/0 | /dev/stdin |
+| /proc/self/fd/1 | /dev/stdout |
+| /proc/self/fd/2 | /dev/stderr |
+
+A `pivot_root` is used to change the root for the process, effectively
+jailing the process inside the rootfs.
+
+```c
+put_old = mkdir(...);
+pivot_root(rootfs, put_old);
+chdir("/");
+unmount(put_old, MS_DETACH);
+rmdir(put_old);
+```
+
+For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined
+with a `chroot` is required as `pivot_root` is not supported in `ramfs`.
+
+```c
+mount(rootfs, "/", NULL, MS_MOVE, NULL);
+chroot(".");
+chdir("/");
+```
+
+The `umask` is set back to `0022` after the filesystem setup has been completed.
+
+### Resources
+
+Cgroups are used to handle resource allocation for containers. This includes
+system resources like cpu, memory, and device access.
+
+| Subsystem | Enabled |
+| ---------- | ------- |
+| devices | 1 |
+| memory | 1 |
+| cpu | 1 |
+| cpuacct | 1 |
+| cpuset | 1 |
+| blkio | 1 |
+| perf_event | 1 |
+| freezer | 1 |
+| hugetlb | 1 |
+
+
+All cgroup subsystem are joined so that statistics can be collected from
+each of the subsystems. Freezer does not expose any stats but is joined
+so that containers can be paused and resumed.
+
+The parent process of the container's init must place the init pid inside
+the correct cgroups before the initialization begins. This is done so
+that no processes or threads escape the cgroups. This sync is
+done via a pipe ( specified in the runtime section below ) that the container's
+init process will block waiting for the parent to finish setup.
+
+### Security
+
+The standard set of Linux capabilities that are set in a container
+provide a good default for security and flexibility for the applications.
+
+
+| Capability | Enabled |
+| -------------------- | ------- |
+| CAP_NET_RAW | 1 |
+| CAP_NET_BIND_SERVICE | 1 |
+| CAP_AUDIT_READ | 1 |
+| CAP_AUDIT_WRITE | 1 |
+| CAP_DAC_OVERRIDE | 1 |
+| CAP_SETFCAP | 1 |
+| CAP_SETPCAP | 1 |
+| CAP_SETGID | 1 |
+| CAP_SETUID | 1 |
+| CAP_MKNOD | 1 |
+| CAP_CHOWN | 1 |
+| CAP_FOWNER | 1 |
+| CAP_FSETID | 1 |
+| CAP_KILL | 1 |
+| CAP_SYS_CHROOT | 1 |
+| CAP_NET_BROADCAST | 0 |
+| CAP_SYS_MODULE | 0 |
+| CAP_SYS_RAWIO | 0 |
+| CAP_SYS_PACCT | 0 |
+| CAP_SYS_ADMIN | 0 |
+| CAP_SYS_NICE | 0 |
+| CAP_SYS_RESOURCE | 0 |
+| CAP_SYS_TIME | 0 |
+| CAP_SYS_TTY_CONFIG | 0 |
+| CAP_AUDIT_CONTROL | 0 |
+| CAP_MAC_OVERRIDE | 0 |
+| CAP_MAC_ADMIN | 0 |
+| CAP_NET_ADMIN | 0 |
+| CAP_SYSLOG | 0 |
+| CAP_DAC_READ_SEARCH | 0 |
+| CAP_LINUX_IMMUTABLE | 0 |
+| CAP_IPC_LOCK | 0 |
+| CAP_IPC_OWNER | 0 |
+| CAP_SYS_PTRACE | 0 |
+| CAP_SYS_BOOT | 0 |
+| CAP_LEASE | 0 |
+| CAP_WAKE_ALARM | 0 |
+| CAP_BLOCK_SUSPE | 0 |
+
+
+Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
+and [selinux](http://selinuxproject.org/page/Main_Page) can be used with
+the containers. A container should support setting an apparmor profile or
+selinux process and mount labels if provided in the configuration.
+
+Standard apparmor profile:
+```c
+#include
+profile flags=(attach_disconnected,mediate_deleted) {
+ #include
+ network,
+ capability,
+ file,
+ umount,
+
+ deny @{PROC}/sys/fs/** wklx,
+ deny @{PROC}/sysrq-trigger rwklx,
+ deny @{PROC}/mem rwklx,
+ deny @{PROC}/kmem rwklx,
+ deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
+ deny @{PROC}/sys/kernel/*/** wklx,
+
+ deny mount,
+
+ deny /sys/[^f]*/** wklx,
+ deny /sys/f[^s]*/** wklx,
+ deny /sys/fs/[^c]*/** wklx,
+ deny /sys/fs/c[^g]*/** wklx,
+ deny /sys/fs/cg[^r]*/** wklx,
+ deny /sys/firmware/efi/efivars/** rwklx,
+ deny /sys/kernel/security/** rwklx,
+}
+```
+
+*TODO: seccomp work is being done to find a good default config*
+
+### Runtime and Init Process
+
+During container creation the parent process needs to talk to the container's init
+process and have a form of synchronization. This is accomplished by creating
+a pipe that is passed to the container's init. When the init process first spawns
+it will block on its side of the pipe until the parent closes its side. This
+allows the parent to have time to set the new process inside a cgroup hierarchy
+and/or write any uid/gid mappings required for user namespaces.
+The pipe is passed to the init process via FD 3.
+
+The application consuming libcontainer should be compiled statically. libcontainer
+does not define any init process and the arguments provided are used to `exec` the
+process inside the application. There should be no long running init within the
+container spec.
+
+If a pseudo tty is provided to a container it will open and `dup2` the console
+as the container's STDIN, STDOUT, STDERR as well as mounting the console
+as `/dev/console`.
+
+An extra set of mounts are provided to a container and setup for use. A container's
+rootfs can contain some non portable files inside that can cause side effects during
+execution of a process. These files are usually created and populated with the container
+specific information via the runtime.
+
+**Extra runtime files:**
+* /etc/hosts
+* /etc/resolv.conf
+* /etc/hostname
+* /etc/localtime
+
+
+#### Defaults
+
+There are a few defaults that can be overridden by users, but in their omission
+these apply to processes within a container.
+
+| Type | Value |
+| ------------------- | ------------------------------ |
+| Parent Death Signal | SIGKILL |
+| UID | 0 |
+| GID | 0 |
+| GROUPS | 0, NULL |
+| CWD | "/" |
+| $HOME | Current user's home dir or "/" |
+| Readonly rootfs | false |
+| Pseudo TTY | false |
+
+
+## Actions
+
+After a container is created there is a standard set of actions that can
+be done to the container. These actions are part of the public API for
+a container.
+
+| Action | Description |
+| -------------- | ------------------------------------------------------------------ |
+| Get processes | Return all the pids for processes running inside a container |
+| Get Stats | Return resource statistics for the container as a whole |
+| Wait | Wait waits on the container's init process ( pid 1 ) |
+| Wait Process | Wait on any of the container's processes returning the exit status |
+| Destroy | Kill the container's init process and remove any filesystem state |
+| Signal | Send a signal to the container's init process |
+| Signal Process | Send a signal to any of the container's processes |
+| Pause | Pause all processes inside the container |
+| Resume | Resume all processes inside the container if paused |
+| Exec | Execute a new process inside of the container ( requires setns ) |
+| Set | Setup configs of the container after it's created |
+
+### Execute a new process inside of a running container.
+
+User can execute a new process inside of a running container. Any binaries to be
+executed must be accessible within the container's rootfs.
+
+The started process will run inside the container's rootfs. Any changes
+made by the process to the container's filesystem will persist after the
+process finished executing.
+
+The started process will join all the container's existing namespaces. When the
+container is paused, the process will also be paused and will resume when
+the container is unpaused. The started process will only run when the container's
+primary process (PID 1) is running, and will not be restarted when the container
+is restarted.
+
+#### Planned additions
+
+The started process will have its own cgroups nested inside the container's
+cgroups. This is used for process tracking and optionally resource allocation
+handling for the new process. Freezer cgroup is required, the rest of the cgroups
+are optional. The process executor must place its pid inside the correct
+cgroups before starting the process. This is done so that no child processes or
+threads can escape the cgroups.
+
+When the process is stopped, the process executor will try (in a best-effort way)
+to stop all its children and remove the sub-cgroups.
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
new file mode 100644
index 0000000..22c17f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go
@@ -0,0 +1,38 @@
+// +build apparmor,linux
+
+package apparmor
+
+// #cgo LDFLAGS: -lapparmor
+// #include
+// #include
+import "C"
+import (
+ "io/ioutil"
+ "os"
+ "unsafe"
+)
+
+// IsEnabled returns true if apparmor is enabled for the host.
+func IsEnabled() bool {
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
+ if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
+ buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+ return err == nil && len(buf) > 1 && buf[0] == 'Y'
+ }
+ }
+ return false
+}
+
+// ApplyProfile will apply the profile with the specified name to the process after
+// the next exec.
+func ApplyProfile(name string) error {
+ if name == "" {
+ return nil
+ }
+ cName := C.CString(name)
+ defer C.free(unsafe.Pointer(cName))
+ if _, err := C.aa_change_onexec(cName); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go
new file mode 100644
index 0000000..d4110cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go
@@ -0,0 +1,20 @@
+// +build !apparmor !linux
+
+package apparmor
+
+import (
+ "errors"
+)
+
+var ErrApparmorNotEnabled = errors.New("apparmor: config provided but apparmor not supported")
+
+func IsEnabled() bool {
+ return false
+}
+
+func ApplyProfile(name string) error {
+ if name != "" {
+ return ErrApparmorNotEnabled
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/capabilities_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
new file mode 100644
index 0000000..4eda56d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/syndtr/gocapability/capability"
+)
+
+const allCapabilityTypes = capability.CAPS | capability.BOUNDS
+
+var capabilityMap map[string]capability.Cap
+
+func init() {
+ capabilityMap = make(map[string]capability.Cap)
+ last := capability.CAP_LAST_CAP
+ // workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+ if last == capability.Cap(63) {
+ last = capability.CAP_BLOCK_SUSPEND
+ }
+ for _, cap := range capability.List() {
+ if cap > last {
+ continue
+ }
+ capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))
+ capabilityMap[capKey] = cap
+ }
+}
+
+func newCapWhitelist(caps []string) (*whitelist, error) {
+ l := []capability.Cap{}
+ for _, c := range caps {
+ v, ok := capabilityMap[c]
+ if !ok {
+ return nil, fmt.Errorf("unknown capability %q", c)
+ }
+ l = append(l, v)
+ }
+ pid, err := capability.NewPid(os.Getpid())
+ if err != nil {
+ return nil, err
+ }
+ return &whitelist{
+ keep: l,
+ pid: pid,
+ }, nil
+}
+
+type whitelist struct {
+ pid capability.Capabilities
+ keep []capability.Cap
+}
+
+// dropBoundingSet drops the capability bounding set to those specified in the whitelist.
+func (w *whitelist) dropBoundingSet() error {
+ w.pid.Clear(capability.BOUNDS)
+ w.pid.Set(capability.BOUNDS, w.keep...)
+ return w.pid.Apply(capability.BOUNDS)
+}
+
+// drop drops all capabilities for the current process except those specified in the whitelist.
+func (w *whitelist) drop() error {
+ w.pid.Clear(allCapabilityTypes)
+ w.pid.Set(allCapabilityTypes, w.keep...)
+ return w.pid.Apply(allCapabilityTypes)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
new file mode 100644
index 0000000..c8f7796
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
@@ -0,0 +1,64 @@
+// +build linux
+
+package cgroups
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager interface {
+ // Apply cgroup configuration to the process with the specified pid
+ Apply(pid int) error
+
+ // Returns the PIDs inside the cgroup set
+ GetPids() ([]int, error)
+
+ // Returns the PIDs inside the cgroup set & all sub-cgroups
+ GetAllPids() ([]int, error)
+
+ // Returns statistics for the cgroup set
+ GetStats() (*Stats, error)
+
+ // Toggles the freezer cgroup according with specified state
+ Freeze(state configs.FreezerState) error
+
+ // Destroys the cgroup set
+ Destroy() error
+
+ // NewCgroupManager() and LoadCgroupManager() require following attributes:
+ // Paths map[string]string
+ // Cgroups *cgroups.Cgroup
+ // Paths maps cgroup subsystem to path at which it is mounted.
+ // Cgroups specifies specific cgroup settings for the various subsystems
+
+ // Returns cgroup paths to save in a state file and to be able to
+ // restore the object later.
+ GetPaths() map[string]string
+
+ // Set the cgroup as configured.
+ Set(container *configs.Config) error
+}
+
+type NotFoundError struct {
+ Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+ return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+ return &NotFoundError{
+ Subsystem: sub,
+ }
+}
+
+func IsNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*NotFoundError)
+ return ok
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
new file mode 100644
index 0000000..278d507
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package cgroups
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
new file mode 100644
index 0000000..4da3b73
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go
@@ -0,0 +1,414 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "sync"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var (
+ subsystems = subsystemSet{
+ &CpusetGroup{},
+ &DevicesGroup{},
+ &MemoryGroup{},
+ &CpuGroup{},
+ &CpuacctGroup{},
+ &PidsGroup{},
+ &BlkioGroup{},
+ &HugetlbGroup{},
+ &NetClsGroup{},
+ &NetPrioGroup{},
+ &PerfEventGroup{},
+ &FreezerGroup{},
+ }
+ CgroupProcesses = "cgroup.procs"
+ HugePageSizes, _ = cgroups.GetHugePageSize()
+)
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+type subsystemSet []subsystem
+
+func (s subsystemSet) Get(name string) (subsystem, error) {
+ for _, ss := range s {
+ if ss.Name() == name {
+ return ss, nil
+ }
+ }
+ return nil, errSubsystemDoesNotExist
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Removes the cgroup represented by 'cgroupData'.
+ Remove(*cgroupData) error
+ // Creates and joins the cgroup represented by 'cgroupData'.
+ Apply(*cgroupData) error
+ // Set the cgroup represented by cgroup.
+ Set(path string, cgroup *configs.Cgroup) error
+}
+
+type Manager struct {
+ mu sync.Mutex
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+// The absolute path to the root of the cgroup hierarchies.
+var cgroupRootLock sync.Mutex
+var cgroupRoot string
+
+// Gets the cgroupRoot.
+func getCgroupRoot() (string, error) {
+ cgroupRootLock.Lock()
+ defer cgroupRootLock.Unlock()
+
+ if cgroupRoot != "" {
+ return cgroupRoot, nil
+ }
+
+ root, err := cgroups.FindCgroupMountpointDir()
+ if err != nil {
+ return "", err
+ }
+
+ if _, err := os.Stat(root); err != nil {
+ return "", err
+ }
+
+ cgroupRoot = root
+ return cgroupRoot, nil
+}
+
+type cgroupData struct {
+ root string
+ parent string
+ name string
+ config *configs.Cgroup
+ pid int
+}
+
+func (m *Manager) Apply(pid int) (err error) {
+ if m.Cgroups == nil {
+ return nil
+ }
+
+ var c = m.Cgroups
+
+ d, err := getCgroupData(m.Cgroups, pid)
+ if err != nil {
+ return err
+ }
+
+ if c.Paths != nil {
+ paths := make(map[string]string)
+ for name, path := range c.Paths {
+ _, err := d.path(name)
+ if err != nil {
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[name] = path
+ }
+ m.Paths = paths
+ return cgroups.EnterPid(m.Paths, pid)
+ }
+
+ paths := make(map[string]string)
+ defer func() {
+ if err != nil {
+ cgroups.RemovePaths(paths)
+ }
+ }()
+ for _, sys := range subsystems {
+ if err := sys.Apply(d); err != nil {
+ return err
+ }
+ // TODO: Apply should, ideally, be reentrant or be broken up into a separate
+ // create and join phase so that the cgroup hierarchy for a container can be
+ // created then join consists of writing the process pids to cgroup.procs
+ p, err := d.path(sys.Name())
+ if err != nil {
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[sys.Name()] = p
+ }
+ m.Paths = paths
+ return nil
+}
+
+func (m *Manager) Destroy() error {
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err := cgroups.RemovePaths(m.Paths); err != nil {
+ return err
+ }
+ m.Paths = make(map[string]string)
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ paths := m.Paths
+ m.mu.Unlock()
+ return paths
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for name, path := range m.Paths {
+ sys, err := subsystems.Get(name)
+ if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+ return stats, nil
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ for _, sys := range subsystems {
+ // Generate fake cgroup data.
+ d, err := getCgroupData(container.Cgroups, -1)
+ if err != nil {
+ return err
+ }
+ // Get the path, but don't error out if the cgroup wasn't found.
+ path, err := d.path(sys.Name())
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ if m.Paths["cpu"] != "" {
+ if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Freeze toggles the container's freezer cgroup depending on the state
+// provided
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ d, err := getCgroupData(m.Cgroups, 0)
+ if err != nil {
+ return err
+ }
+ dir, err := d.path("freezer")
+ if err != nil {
+ return err
+ }
+ prevState := m.Cgroups.Resources.Freezer
+ m.Cgroups.Resources.Freezer = state
+ freezer, err := subsystems.Get("freezer")
+ if err != nil {
+ return err
+ }
+ err = freezer.Set(dir, m.Cgroups)
+ if err != nil {
+ m.Cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ dir, err := getCgroupPath(m.Cgroups)
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetPids(dir)
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ dir, err := getCgroupPath(m.Cgroups)
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetAllPids(dir)
+}
+
+func getCgroupPath(c *configs.Cgroup) (string, error) {
+ d, err := getCgroupData(c, 0)
+ if err != nil {
+ return "", err
+ }
+
+ return d.path("devices")
+}
+
+// pathClean makes a path safe for use with filepath.Join. This is done by not
+// only cleaning the path, but also (if the path is relative) adding a leading
+// '/' and cleaning it (then removing the leading '/'). This ensures that a
+// path resulting from prepending another path will always resolve to lexically
+// be a subdirectory of the prefixed path. This is all done lexically, so paths
+// that include symlinks won't be safe as a result of using pathClean.
+func pathClean(path string) string {
+ // Ensure that all paths are cleaned (especially problematic ones like
+ // "/../../../../../" which can cause lots of issues).
+ path = filepath.Clean(path)
+
+ // If the path isn't absolute, we need to do more processing to fix paths
+ // such as "../../../..//some/path". We also shouldn't convert absolute
+ // paths to relative ones.
+ if !filepath.IsAbs(path) {
+ path = filepath.Clean(string(os.PathSeparator) + path)
+ // This can't fail, as (by definition) all paths are relative to root.
+ path, _ = filepath.Rel(string(os.PathSeparator), path)
+ }
+
+ // Clean the path again for good measure.
+ return filepath.Clean(path)
+}
+
+func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
+ root, err := getCgroupRoot()
+ if err != nil {
+ return nil, err
+ }
+
+ // Clean the parent slice path.
+ c.Parent = pathClean(c.Parent)
+
+ return &cgroupData{
+ root: root,
+ parent: c.Parent,
+ name: c.Name,
+ config: c,
+ pid: pid,
+ }, nil
+}
+
+func (raw *cgroupData) parentPath(subsystem, mountpoint, root string) (string, error) {
+ // Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating
+ // process could in container and shared pid namespace with host, and
+ // /proc/1/cgroup could point to whole other world of cgroups.
+ initPath, err := cgroups.GetThisCgroupDir(subsystem)
+ if err != nil {
+ return "", err
+ }
+ // This is needed for nested containers, because in /proc/self/cgroup we
+ // see pathes from host, which don't exist in container.
+ relDir, err := filepath.Rel(root, initPath)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(mountpoint, relDir), nil
+}
+
+func (raw *cgroupData) path(subsystem string) (string, error) {
+ mnt, root, err := cgroups.FindCgroupMountpointAndRoot(subsystem)
+ // If we didn't mount the subsystem, there is no point we make the path.
+ if err != nil {
+ return "", err
+ }
+
+ cgPath := filepath.Join(raw.parent, raw.name)
+ // If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
+ if filepath.IsAbs(cgPath) {
+ // Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'.
+ return filepath.Join(raw.root, filepath.Base(mnt), cgPath), nil
+ }
+
+ parentPath, err := raw.parentPath(subsystem, mnt, root)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(parentPath, cgPath), nil
+}
+
+func (raw *cgroupData) join(subsystem string) (string, error) {
+ path, err := raw.path(subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return "", err
+ }
+ if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil {
+ return "", err
+ }
+ return path, nil
+}
+
+func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s.", file)
+ }
+ return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
+}
+
+func readFile(dir, file string) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join(dir, file))
+ return string(data), err
+}
+
+func removePath(p string, err error) error {
+ if err != nil {
+ return err
+ }
+ if p != "" {
+ return os.RemoveAll(p)
+ }
+ return nil
+}
+
+func CheckCpushares(path string, c int64) error {
+ var cpuShares int64
+
+ if c == 0 {
+ return nil
+ }
+
+ fd, err := os.Open(filepath.Join(path, "cpu.shares"))
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ _, err = fmt.Fscanf(fd, "%d", &cpuShares)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ if c > cpuShares {
+ return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
+ } else if c < cpuShares {
+ return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
new file mode 100644
index 0000000..a142cb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go
@@ -0,0 +1,237 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type BlkioGroup struct {
+}
+
+func (s *BlkioGroup) Name() string {
+ return "blkio"
+}
+
+func (s *BlkioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("blkio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.BlkioWeight != 0 {
+ if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.BlkioLeafWeight != 0 {
+ if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil {
+ return err
+ }
+ }
+ for _, wd := range cgroup.Resources.BlkioWeightDevice {
+ if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
+ return err
+ }
+ if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice {
+ if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice {
+ if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+ for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice {
+ if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *BlkioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("blkio"))
+}
+
+/*
+examples:
+
+ blkio.sectors
+ 8:0 6792
+
+ blkio.io_service_bytes
+ 8:0 Read 1282048
+ 8:0 Write 2195456
+ 8:0 Sync 2195456
+ 8:0 Async 1282048
+ 8:0 Total 3477504
+ Total 3477504
+
+ blkio.io_serviced
+ 8:0 Read 124
+ 8:0 Write 104
+ 8:0 Sync 104
+ 8:0 Async 124
+ 8:0 Total 228
+ Total 228
+
+ blkio.io_queued
+ 8:0 Read 0
+ 8:0 Write 0
+ 8:0 Sync 0
+ 8:0 Async 0
+ 8:0 Total 0
+ Total 0
+*/
+
+func splitBlkioStatLine(r rune) bool {
+ return r == ' ' || r == ':'
+}
+
+func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) {
+ var blkioStats []cgroups.BlkioStatEntry
+ f, err := os.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return blkioStats, nil
+ }
+ return nil, err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ // format: dev type amount
+ fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
+ if len(fields) < 3 {
+ if len(fields) == 2 && fields[0] == "Total" {
+ // skip total line
+ continue
+ } else {
+ return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
+ }
+ }
+
+ v, err := strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ major := v
+
+ v, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ minor := v
+
+ op := ""
+ valueField := 2
+ if len(fields) == 4 {
+ op = fields[2]
+ valueField = 3
+ }
+ v, err = strconv.ParseUint(fields[valueField], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v})
+ }
+
+ return blkioStats, nil
+}
+
+func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Try to read CFQ stats available on all CFQ enabled kernels first
+ if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil {
+ return getCFQStats(path, stats)
+ }
+ return getStats(path, stats) // Use generic stats as fallback
+}
+
+func getCFQStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.SectorsRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoQueuedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoWaitTimeRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoMergedRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoTimeRecursive = blkioStats
+
+ return nil
+}
+
+func getStats(path string, stats *cgroups.Stats) error {
+ var blkioStats []cgroups.BlkioStatEntry
+ var err error
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServiceBytesRecursive = blkioStats
+
+ if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil {
+ return err
+ }
+ stats.BlkioStats.IoServicedRecursive = blkioStats
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
new file mode 100644
index 0000000..a4ef28a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
@@ -0,0 +1,94 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type CpuGroup struct {
+}
+
+func (s *CpuGroup) Name() string {
+ return "cpu"
+}
+
+func (s *CpuGroup) Apply(d *cgroupData) error {
+ // We always want to join the cpu group, to allow fair cpu scheduling
+ // on a container basis
+ _, err := d.join("cpu")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpuShares != 0 {
+ if err := writeFile(path, "cpu.shares", strconv.FormatInt(cgroup.Resources.CpuShares, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuPeriod != 0 {
+ if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(cgroup.Resources.CpuPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuQuota != 0 {
+ if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtPeriod != 0 {
+ if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.Resources.CpuRtPeriod, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpuRtRuntime != 0 {
+ if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *CpuGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpu"))
+}
+
+func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
+ f, err := os.Open(filepath.Join(path, "cpu.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer f.Close()
+
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return err
+ }
+ switch t {
+ case "nr_periods":
+ stats.CpuStats.ThrottlingData.Periods = v
+
+ case "nr_throttled":
+ stats.CpuStats.ThrottlingData.ThrottledPeriods = v
+
+ case "throttled_time":
+ stats.CpuStats.ThrottlingData.ThrottledTime = v
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
new file mode 100644
index 0000000..53afbad
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go
@@ -0,0 +1,121 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+const (
+ cgroupCpuacctStat = "cpuacct.stat"
+ nanosecondsInSecond = 1000000000
+)
+
+var clockTicks = uint64(system.GetClockTicks())
+
+type CpuacctGroup struct {
+}
+
+func (s *CpuacctGroup) Name() string {
+ return "cpuacct"
+}
+
+func (s *CpuacctGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *CpuacctGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuacct"))
+}
+
+func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
+ userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path)
+ if err != nil {
+ return err
+ }
+
+ totalUsage, err := getCgroupParamUint(path, "cpuacct.usage")
+ if err != nil {
+ return err
+ }
+
+ percpuUsage, err := getPercpuUsage(path)
+ if err != nil {
+ return err
+ }
+
+ stats.CpuStats.CpuUsage.TotalUsage = totalUsage
+ stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage
+ stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage
+ stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage
+ return nil
+}
+
+// Returns user and kernel usage breakdown in nanoseconds.
+func getCpuUsageBreakdown(path string) (uint64, uint64, error) {
+ userModeUsage := uint64(0)
+ kernelModeUsage := uint64(0)
+ const (
+ userField = "user"
+ systemField = "system"
+ )
+
+ // Expected format:
+ // user
+ // system
+ data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat))
+ if err != nil {
+ return 0, 0, err
+ }
+ fields := strings.Fields(string(data))
+ if len(fields) != 4 {
+ return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat))
+ }
+ if fields[0] != userField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField)
+ }
+ if fields[2] != systemField {
+ return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField)
+ }
+ if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil {
+ return 0, 0, err
+ }
+ if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil {
+ return 0, 0, err
+ }
+
+ return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil
+}
+
+func getPercpuUsage(path string) ([]uint64, error) {
+ percpuUsage := []uint64{}
+ data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
+ if err != nil {
+ return percpuUsage, err
+ }
+ for _, value := range strings.Fields(string(data)) {
+ value, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err)
+ }
+ percpuUsage = append(percpuUsage, value)
+ }
+ return percpuUsage, nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
new file mode 100644
index 0000000..ed10023
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go
@@ -0,0 +1,138 @@
+// +build linux
+
+package fs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type CpusetGroup struct {
+}
+
+func (s *CpusetGroup) Name() string {
+ return "cpuset"
+}
+
+func (s *CpusetGroup) Apply(d *cgroupData) error {
+ dir, err := d.path("cpuset")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return s.ApplyDir(dir, d.config, d.pid)
+}
+
+func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.CpusetCpus != "" {
+ if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.CpusetMems != "" {
+ if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("cpuset"))
+}
+
+func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
+
+func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
+ // This might happen if we have no cpuset cgroup mounted.
+ // Just do nothing and don't fail.
+ if dir == "" {
+ return nil
+ }
+ root, err := getCgroupRoot()
+ if err != nil {
+ return err
+ }
+ if err := s.ensureParent(dir, root); err != nil {
+ return err
+ }
+ // because we are not using d.join we need to place the pid into the procs file
+ // unlike the other subsystems
+ if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
+ if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
+ return
+ }
+ if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil {
+ return
+ }
+ return cpus, mems, nil
+}
+
+// ensureParent makes sure that the parent directory of current is created
+// and populated with the proper cpus and mems files copied from
+// it's parent.
+func (s *CpusetGroup) ensureParent(current, root string) error {
+ parent := filepath.Dir(current)
+ if filepath.Clean(parent) == root {
+ return nil
+ }
+ // Avoid infinite recursion.
+ if parent == current {
+ return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
+ }
+ if err := s.ensureParent(parent, root); err != nil {
+ return err
+ }
+ if err := os.MkdirAll(current, 0755); err != nil {
+ return err
+ }
+ return s.copyIfNeeded(current, parent)
+}
+
+// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
+ var (
+ err error
+ currentCpus, currentMems []byte
+ parentCpus, parentMems []byte
+ )
+
+ if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil {
+ return err
+ }
+ if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil {
+ return err
+ }
+
+ if s.isEmpty(currentCpus) {
+ if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil {
+ return err
+ }
+ }
+ if s.isEmpty(currentMems) {
+ if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *CpusetGroup) isEmpty(b []byte) bool {
+ return len(bytes.Trim(b, "\n")) == 0
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
new file mode 100644
index 0000000..a41ce80
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go
@@ -0,0 +1,60 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type DevicesGroup struct {
+}
+
+func (s *DevicesGroup) Name() string {
+ return "devices"
+}
+
+func (s *DevicesGroup) Apply(d *cgroupData) error {
+ _, err := d.join("devices")
+ if err != nil {
+ // We will return error even it's `not found` error, devices
+ // cgroup is hard requirement for container's security.
+ return err
+ }
+ return nil
+}
+
+func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if !cgroup.Resources.AllowAllDevices {
+ if err := writeFile(path, "devices.deny", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.AllowedDevices {
+ if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if err := writeFile(path, "devices.allow", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.Resources.DeniedDevices {
+ if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *DevicesGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("devices"))
+}
+
+func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
new file mode 100644
index 0000000..e70dfe3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go
@@ -0,0 +1,61 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type FreezerGroup struct {
+}
+
+func (s *FreezerGroup) Name() string {
+ return "freezer"
+}
+
+func (s *FreezerGroup) Apply(d *cgroupData) error {
+ _, err := d.join("freezer")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error {
+ switch cgroup.Resources.Freezer {
+ case configs.Frozen, configs.Thawed:
+ if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil {
+ return err
+ }
+
+ for {
+ state, err := readFile(path, "freezer.state")
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) {
+ break
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+ case configs.Undefined:
+ return nil
+ default:
+ return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer))
+ }
+
+ return nil
+}
+
+func (s *FreezerGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("freezer"))
+}
+
+func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
new file mode 100644
index 0000000..3ef9e03
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package fs
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 0000000..2f97277
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,71 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type HugetlbGroup struct {
+}
+
+func (s *HugetlbGroup) Name() string {
+ return "hugetlb"
+}
+
+func (s *HugetlbGroup) Apply(d *cgroupData) error {
+ _, err := d.join("hugetlb")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, hugetlb := range cgroup.Resources.HugetlbLimit {
+ if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *HugetlbGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("hugetlb"))
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+ hugetlbStats := cgroups.HugetlbStats{}
+ for _, pageSize := range HugePageSizes {
+ usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".")
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ hugetlbStats.Usage = value
+
+ maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".")
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ hugetlbStats.MaxUsage = value
+
+ failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".")
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ return fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ hugetlbStats.Failcnt = value
+
+ stats.HugetlbStats[pageSize] = hugetlbStats
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
new file mode 100644
index 0000000..8c3e963
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go
@@ -0,0 +1,192 @@
+// +build linux
+
+package fs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type MemoryGroup struct {
+}
+
+func (s *MemoryGroup) Name() string {
+ return "memory"
+}
+
+func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
+ path, err := d.path("memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ if memoryAssigned(d.config) {
+ if path != "" {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ }
+ // We have to set kernel memory here, as we can't change it once
+ // processes have been attached.
+ if err := s.SetKernelMemory(path, d.config); err != nil {
+ return err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ os.RemoveAll(path)
+ }
+ }()
+
+ // We need to join memory cgroup after set memory limits, because
+ // kmem.limit_in_bytes can only be set when the cgroup is empty.
+ _, err = d.join("memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *MemoryGroup) SetKernelMemory(path string, cgroup *configs.Cgroup) error {
+ // This has to be done separately because it has special constraints (it
+ // can't be done after there are processes attached to the cgroup).
+ if cgroup.Resources.KernelMemory > 0 {
+ if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.Memory != 0 {
+ if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemoryReservation != 0 {
+ if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwap > 0 {
+ if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.OomKillDisable {
+ if err := writeFile(path, "memory.oom_control", "1"); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwappiness >= 0 && cgroup.Resources.MemorySwappiness <= 100 {
+ if err := writeFile(path, "memory.swappiness", strconv.FormatInt(cgroup.Resources.MemorySwappiness, 10)); err != nil {
+ return err
+ }
+ } else if cgroup.Resources.MemorySwappiness == -1 {
+ return nil
+ } else {
+ return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", cgroup.Resources.MemorySwappiness)
+ }
+
+ return nil
+}
+
+func (s *MemoryGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("memory"))
+}
+
+func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
+ // Set stats from memory.stat.
+ statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ defer statsFile.Close()
+
+ sc := bufio.NewScanner(statsFile)
+ for sc.Scan() {
+ t, v, err := getCgroupParamKeyValue(sc.Text())
+ if err != nil {
+ return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err)
+ }
+ stats.MemoryStats.Stats[t] = v
+ }
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
+
+ memoryUsage, err := getMemoryData(path, "")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.Usage = memoryUsage
+ swapUsage, err := getMemoryData(path, "memsw")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.SwapUsage = swapUsage
+ kernelUsage, err := getMemoryData(path, "kmem")
+ if err != nil {
+ return err
+ }
+ stats.MemoryStats.KernelUsage = kernelUsage
+
+ return nil
+}
+
+func memoryAssigned(cgroup *configs.Cgroup) bool {
+ return cgroup.Resources.Memory != 0 ||
+ cgroup.Resources.MemoryReservation != 0 ||
+ cgroup.Resources.MemorySwap > 0 ||
+ cgroup.Resources.KernelMemory > 0 ||
+ cgroup.Resources.OomKillDisable ||
+ cgroup.Resources.MemorySwappiness != -1
+}
+
+func getMemoryData(path, name string) (cgroups.MemoryData, error) {
+ memoryData := cgroups.MemoryData{}
+
+ moduleName := "memory"
+ if name != "" {
+ moduleName = strings.Join([]string{"memory", name}, ".")
+ }
+ usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
+ maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
+ failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
+
+ value, err := getCgroupParamUint(path, usage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err)
+ }
+ memoryData.Usage = value
+ value, err = getCgroupParamUint(path, maxUsage)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err)
+ }
+ memoryData.MaxUsage = value
+ value, err = getCgroupParamUint(path, failcnt)
+ if err != nil {
+ if moduleName != "memory" && os.IsNotExist(err) {
+ return cgroups.MemoryData{}, nil
+ }
+ return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
+ }
+ memoryData.Failcnt = value
+
+ return memoryData, nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
new file mode 100644
index 0000000..0e423f6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go
@@ -0,0 +1,32 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NameGroup struct {
+ GroupName string
+}
+
+func (s *NameGroup) Name() string {
+ return s.GroupName
+}
+
+func (s *NameGroup) Apply(d *cgroupData) error {
+ return nil
+}
+
+func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *NameGroup) Remove(d *cgroupData) error {
+ return nil
+}
+
+func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
new file mode 100644
index 0000000..8a4054b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetClsGroup struct {
+}
+
+func (s *NetClsGroup) Name() string {
+ return "net_cls"
+}
+
+func (s *NetClsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_cls")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.NetClsClassid != "" {
+ if err := writeFile(path, "net_cls.classid", cgroup.Resources.NetClsClassid); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetClsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_cls"))
+}
+
+func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
new file mode 100644
index 0000000..d0ab2af
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go
@@ -0,0 +1,41 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type NetPrioGroup struct {
+}
+
+func (s *NetPrioGroup) Name() string {
+ return "net_prio"
+}
+
+func (s *NetPrioGroup) Apply(d *cgroupData) error {
+ _, err := d.join("net_prio")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error {
+ for _, prioMap := range cgroup.Resources.NetPrioIfpriomap {
+ if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *NetPrioGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("net_prio"))
+}
+
+func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
new file mode 100644
index 0000000..5693676
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go
@@ -0,0 +1,35 @@
+// +build linux
+
+package fs
+
+import (
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PerfEventGroup struct {
+}
+
+func (s *PerfEventGroup) Name() string {
+ return "perf_event"
+}
+
+func (s *PerfEventGroup) Apply(d *cgroupData) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *PerfEventGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("perf_event"))
+}
+
+func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
new file mode 100644
index 0000000..96cbb89
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go
@@ -0,0 +1,57 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type PidsGroup struct {
+}
+
+func (s *PidsGroup) Name() string {
+ return "pids"
+}
+
+func (s *PidsGroup) Apply(d *cgroupData) error {
+ _, err := d.join("pids")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
+ if cgroup.Resources.PidsLimit != 0 {
+ // "max" is the fallback value.
+ limit := "max"
+
+ if cgroup.Resources.PidsLimit > 0 {
+ limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
+ }
+
+ if err := writeFile(path, "pids.max", limit); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *PidsGroup) Remove(d *cgroupData) error {
+ return removePath(d.path("pids"))
+}
+
+func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
+ value, err := getCgroupParamUint(path, "pids.current")
+ if err != nil {
+ return fmt.Errorf("failed to parse pids.current - %s", err)
+ }
+
+ stats.PidsStats.Current = value
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
new file mode 100644
index 0000000..852b183
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go
@@ -0,0 +1,79 @@
+// +build linux
+
+package fs
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var (
+ ErrNotSupportStat = errors.New("stats are not supported for subsystem")
+ ErrNotValidFormat = errors.New("line is not a valid key value format")
+)
+
+// Saturates negative values at zero and returns a uint64.
+// Due to kernel bugs, some of the memory cgroup stats can be negative.
+func parseUint(s string, base, bitSize int) (uint64, error) {
+ value, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
+ return 0, nil
+ }
+
+ return value, err
+ }
+
+ return value, nil
+}
+
+// Parses a cgroup param and returns as name, value
+// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234
+func getCgroupParamKeyValue(t string) (string, uint64, error) {
+ parts := strings.Fields(t)
+ switch len(parts) {
+ case 2:
+ value, err := parseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err)
+ }
+
+ return parts[0], value, nil
+ default:
+ return "", 0, ErrNotValidFormat
+ }
+}
+
+// Gets a single uint64 value from the specified cgroup file.
+func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) {
+ fileName := filepath.Join(cgroupPath, cgroupFile)
+ contents, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return 0, err
+ }
+
+ res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName)
+ }
+ return res, nil
+}
+
+// Gets a string value from the specified cgroup file
+func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) {
+ contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(contents)), nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
new file mode 100644
index 0000000..74c65ab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
@@ -0,0 +1,98 @@
+// +build linux
+
+package cgroups
+
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods,omitempty"`
+ // Number of periods when the container hit its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+}
+
+// All CPU stats are aggregate since container inception.
+type CpuUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds.
+ TotalUsage uint64 `json:"total_usage,omitempty"`
+ // Total CPU time consumed per core.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+ // Time spent by tasks of the cgroup in kernel mode.
+ // Units: nanoseconds.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+ // Time spent by tasks of the cgroup in user mode.
+ // Units: nanoseconds.
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+ CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryData struct {
+ Usage uint64 `json:"usage,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ Failcnt uint64 `json:"failcnt"`
+}
+type MemoryStats struct {
+ // memory used for cache
+ Cache uint64 `json:"cache,omitempty"`
+ // usage of memory
+ Usage MemoryData `json:"usage,omitempty"`
+ // usage of memory + swap
+ SwapUsage MemoryData `json:"swap_usage,omitempty"`
+ // usafe of kernel memory
+ KernelUsage MemoryData `json:"kernel_usage,omitempty"`
+ Stats map[string]uint64 `json:"stats,omitempty"`
+}
+
+type PidsStats struct {
+ // number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+}
+
+type BlkioStatEntry struct {
+ Major uint64 `json:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty"`
+ Op string `json:"op,omitempty"`
+ Value uint64 `json:"value,omitempty"`
+}
+
+type BlkioStats struct {
+ // number of bytes tranferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
+}
+
+type HugetlbStats struct {
+ // current res_counter usage for hugetlb
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // number of times htgetlb usage allocation failure.
+ Failcnt uint64 `json:"failcnt"`
+}
+
+type Stats struct {
+ CpuStats CpuStats `json:"cpu_stats,omitempty"`
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+ // the map is in the format "size of hugepage: stats of the hugepage"
+ HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
+}
+
+func NewStats() *Stats {
+ memoryStats := MemoryStats{Stats: make(map[string]uint64)}
+ hugetlbStats := make(map[string]HugetlbStats)
+ return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go
new file mode 100644
index 0000000..7de9ae6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go
@@ -0,0 +1,55 @@
+// +build !linux
+
+package systemd
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager struct {
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+func UseSystemd() bool {
+ return false
+}
+
+func (m *Manager) Apply(pid int) error {
+ return fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) Destroy() error {
+ return fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ return nil
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ return nil, fmt.Errorf("Systemd not supported")
+}
+
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ return fmt.Errorf("Systemd not supported")
+}
+
+func Freeze(c *configs.Cgroup, state configs.FreezerState) error {
+ return fmt.Errorf("Systemd not supported")
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go
new file mode 100644
index 0000000..db020a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go
@@ -0,0 +1,596 @@
+// +build linux
+
+package systemd
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ systemdDbus "github.com/coreos/go-systemd/dbus"
+ systemdUtil "github.com/coreos/go-systemd/util"
+ "github.com/godbus/dbus"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager struct {
+ mu sync.Mutex
+ Cgroups *configs.Cgroup
+ Paths map[string]string
+}
+
+type subsystem interface {
+ // Name returns the name of the subsystem.
+ Name() string
+ // Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+ GetStats(path string, stats *cgroups.Stats) error
+ // Set the cgroup represented by cgroup.
+ Set(path string, cgroup *configs.Cgroup) error
+}
+
+var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
+
+type subsystemSet []subsystem
+
+func (s subsystemSet) Get(name string) (subsystem, error) {
+ for _, ss := range s {
+ if ss.Name() == name {
+ return ss, nil
+ }
+ }
+ return nil, errSubsystemDoesNotExist
+}
+
+var subsystems = subsystemSet{
+ &fs.CpusetGroup{},
+ &fs.DevicesGroup{},
+ &fs.MemoryGroup{},
+ &fs.CpuGroup{},
+ &fs.CpuacctGroup{},
+ &fs.PidsGroup{},
+ &fs.BlkioGroup{},
+ &fs.HugetlbGroup{},
+ &fs.PerfEventGroup{},
+ &fs.FreezerGroup{},
+ &fs.NetPrioGroup{},
+ &fs.NetClsGroup{},
+ &fs.NameGroup{GroupName: "name=systemd"},
+}
+
+const (
+ testScopeWait = 4
+)
+
+var (
+ connLock sync.Mutex
+ theConn *systemdDbus.Conn
+ hasStartTransientUnit bool
+ hasTransientDefaultDependencies bool
+)
+
+func newProp(name string, units interface{}) systemdDbus.Property {
+ return systemdDbus.Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+func UseSystemd() bool {
+ if !systemdUtil.IsRunningSystemd() {
+ return false
+ }
+
+ connLock.Lock()
+ defer connLock.Unlock()
+
+ if theConn == nil {
+ var err error
+ theConn, err = systemdDbus.New()
+ if err != nil {
+ return false
+ }
+
+ // Assume we have StartTransientUnit
+ hasStartTransientUnit = true
+
+ // But if we get UnknownMethod error we don't
+ if _, err := theConn.StartTransientUnit("test.scope", "invalid", nil, nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
+ hasStartTransientUnit = false
+ return hasStartTransientUnit
+ }
+ }
+ }
+
+ // Ensure the scope name we use doesn't exist. Use the Pid to
+ // avoid collisions between multiple libcontainer users on a
+ // single host.
+ scope := fmt.Sprintf("libcontainer-%d-systemd-test-default-dependencies.scope", os.Getpid())
+ testScopeExists := true
+ for i := 0; i <= testScopeWait; i++ {
+ if _, err := theConn.StopUnit(scope, "replace", nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if strings.Contains(dbusError.Name, "org.freedesktop.systemd1.NoSuchUnit") {
+ testScopeExists = false
+ break
+ }
+ }
+ }
+ time.Sleep(time.Millisecond)
+ }
+
+ // Bail out if we can't kill this scope without testing for DefaultDependencies
+ if testScopeExists {
+ return hasStartTransientUnit
+ }
+
+ // Assume StartTransientUnit on a scope allows DefaultDependencies
+ hasTransientDefaultDependencies = true
+ ddf := newProp("DefaultDependencies", false)
+ if _, err := theConn.StartTransientUnit(scope, "replace", []systemdDbus.Property{ddf}, nil); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") {
+ hasTransientDefaultDependencies = false
+ }
+ }
+ }
+
+ // Not critical because of the stop unit logic above.
+ theConn.StopUnit(scope, "replace", nil)
+ }
+ return hasStartTransientUnit
+}
+
+func getIfaceForUnit(unitName string) string {
+ if strings.HasSuffix(unitName, ".scope") {
+ return "Scope"
+ }
+ if strings.HasSuffix(unitName, ".service") {
+ return "Service"
+ }
+ return "Unit"
+}
+
+func (m *Manager) Apply(pid int) error {
+ var (
+ c = m.Cgroups
+ unitName = getUnitName(c)
+ slice = "system.slice"
+ properties []systemdDbus.Property
+ )
+
+ if c.Paths != nil {
+ paths := make(map[string]string)
+ for name, path := range c.Paths {
+ _, err := getSubsystemPath(m.Cgroups, name)
+ if err != nil {
+ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[name] = path
+ }
+ m.Paths = paths
+ return cgroups.EnterPid(m.Paths, pid)
+ }
+
+ if c.Parent != "" {
+ slice = c.Parent
+ }
+
+ properties = append(properties,
+ systemdDbus.PropSlice(slice),
+ systemdDbus.PropDescription("docker container "+c.Name),
+ newProp("PIDs", []uint32{uint32(pid)}),
+ )
+
+ // Always enable accounting, this gets us the same behaviour as the fs implementation,
+ // plus the kernel has some problems with joining the memory cgroup at a later time.
+ properties = append(properties,
+ newProp("MemoryAccounting", true),
+ newProp("CPUAccounting", true),
+ newProp("BlockIOAccounting", true))
+
+ if hasTransientDefaultDependencies {
+ properties = append(properties,
+ newProp("DefaultDependencies", false))
+ }
+
+ if c.Resources.Memory != 0 {
+ properties = append(properties,
+ newProp("MemoryLimit", uint64(c.Resources.Memory)))
+ }
+
+ if c.Resources.CpuShares != 0 {
+ properties = append(properties,
+ newProp("CPUShares", uint64(c.Resources.CpuShares)))
+ }
+
+ if c.Resources.BlkioWeight != 0 {
+ properties = append(properties,
+ newProp("BlockIOWeight", uint64(c.Resources.BlkioWeight)))
+ }
+
+ // We need to set kernel memory before processes join cgroup because
+ // kmem.limit_in_bytes can only be set when the cgroup is empty.
+ // And swap memory limit needs to be set after memory limit, only
+ // memory limit is handled by systemd, so it's kind of ugly here.
+ if c.Resources.KernelMemory > 0 {
+ if err := setKernelMemory(c); err != nil {
+ return err
+ }
+ }
+
+ if _, err := theConn.StartTransientUnit(unitName, "replace", properties, nil); err != nil {
+ return err
+ }
+
+ if err := joinDevices(c, pid); err != nil {
+ return err
+ }
+
+ // TODO: CpuQuota and CpuPeriod not available in systemd
+ // we need to manually join the cpu.cfs_quota_us and cpu.cfs_period_us
+ if err := joinCpu(c, pid); err != nil {
+ return err
+ }
+
+ // TODO: MemoryReservation and MemorySwap not available in systemd
+ if err := joinMemory(c, pid); err != nil {
+ return err
+ }
+
+ // we need to manually join the freezer, net_cls, net_prio, pids and cpuset cgroup in systemd
+ // because it does not currently support it via the dbus api.
+ if err := joinFreezer(c, pid); err != nil {
+ return err
+ }
+
+ if err := joinNetPrio(c, pid); err != nil {
+ return err
+ }
+ if err := joinNetCls(c, pid); err != nil {
+ return err
+ }
+
+ if err := joinPids(c, pid); err != nil {
+ return err
+ }
+
+ if err := joinCpuset(c, pid); err != nil {
+ return err
+ }
+
+ if err := joinHugetlb(c, pid); err != nil {
+ return err
+ }
+
+ if err := joinPerfEvent(c, pid); err != nil {
+ return err
+ }
+ // FIXME: Systemd does have `BlockIODeviceWeight` property, but we got problem
+ // using that (at least on systemd 208, see https://github.com/opencontainers/runc/libcontainer/pull/354),
+ // so use fs work around for now.
+ if err := joinBlkio(c, pid); err != nil {
+ return err
+ }
+
+ paths := make(map[string]string)
+ for _, s := range subsystems {
+ subsystemPath, err := getSubsystemPath(m.Cgroups, s.Name())
+ if err != nil {
+ // Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+ if cgroups.IsNotFound(err) {
+ continue
+ }
+ return err
+ }
+ paths[s.Name()] = subsystemPath
+ }
+ m.Paths = paths
+ return nil
+}
+
+func (m *Manager) Destroy() error {
+ if m.Cgroups.Paths != nil {
+ return nil
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ theConn.StopUnit(getUnitName(m.Cgroups), "replace", nil)
+ if err := cgroups.RemovePaths(m.Paths); err != nil {
+ return err
+ }
+ m.Paths = make(map[string]string)
+ return nil
+}
+
+func (m *Manager) GetPaths() map[string]string {
+ m.mu.Lock()
+ paths := m.Paths
+ m.mu.Unlock()
+ return paths
+}
+
+func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s.", file)
+ }
+ return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
+}
+
+func join(c *configs.Cgroup, subsystem string, pid int) (string, error) {
+ path, err := getSubsystemPath(c, subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return "", err
+ }
+ if err := writeFile(path, "cgroup.procs", strconv.Itoa(pid)); err != nil {
+ return "", err
+ }
+
+ return path, nil
+}
+
+func joinCpu(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "cpu", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func joinFreezer(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "freezer", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func joinNetPrio(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "net_prio", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func joinNetCls(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "net_cls", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func joinPids(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "pids", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+// systemd represents slice heirarchy using `-`, so we need to follow suit when
+// generating the path of slice. Essentially, test-a-b.slice becomes
+// test.slice/test-a.slice/test-a-b.slice.
+func expandSlice(slice string) (string, error) {
+ suffix := ".slice"
+ sliceName := strings.TrimSuffix(slice, suffix)
+
+ var path, prefix string
+ for _, component := range strings.Split(sliceName, "-") {
+ // test--a.slice isn't permitted, nor is -test.slice.
+ if component == "" {
+ return "", fmt.Errorf("invalid slice name: %s", slice)
+ }
+
+ // Append the component to the path and to the prefix.
+ path += prefix + component + suffix + "/"
+ prefix += component + "-"
+ }
+
+ return path, nil
+}
+
+func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) {
+ mountpoint, err := cgroups.FindCgroupMountpoint(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ initPath, err := cgroups.GetInitCgroupDir(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ slice := "system.slice"
+ if c.Parent != "" {
+ slice = c.Parent
+ }
+
+ slice, err = expandSlice(slice)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
+}
+
+func (m *Manager) Freeze(state configs.FreezerState) error {
+ path, err := getSubsystemPath(m.Cgroups, "freezer")
+ if err != nil {
+ return err
+ }
+ prevState := m.Cgroups.Resources.Freezer
+ m.Cgroups.Resources.Freezer = state
+ freezer, err := subsystems.Get("freezer")
+ if err != nil {
+ return err
+ }
+ err = freezer.Set(path, m.Cgroups)
+ if err != nil {
+ m.Cgroups.Resources.Freezer = prevState
+ return err
+ }
+ return nil
+}
+
+func (m *Manager) GetPids() ([]int, error) {
+ path, err := getSubsystemPath(m.Cgroups, "devices")
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetPids(path)
+}
+
+func (m *Manager) GetAllPids() ([]int, error) {
+ path, err := getSubsystemPath(m.Cgroups, "devices")
+ if err != nil {
+ return nil, err
+ }
+ return cgroups.GetAllPids(path)
+}
+
+func (m *Manager) GetStats() (*cgroups.Stats, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ stats := cgroups.NewStats()
+ for name, path := range m.Paths {
+ sys, err := subsystems.Get(name)
+ if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.GetStats(path, stats); err != nil {
+ return nil, err
+ }
+ }
+
+ return stats, nil
+}
+
+func (m *Manager) Set(container *configs.Config) error {
+ for _, sys := range subsystems {
+ // Get the subsystem path, but don't error out for not found cgroups.
+ path, err := getSubsystemPath(container.Cgroups, sys.Name())
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ if m.Paths["cpu"] != "" {
+ if err := fs.CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getUnitName(c *configs.Cgroup) string {
+ return fmt.Sprintf("%s-%s.scope", c.ScopePrefix, c.Name)
+}
+
+// Atm we can't use the systemd device support because of two missing things:
+// * Support for wildcards to allow mknod on any device
+// * Support for wildcards to allow /dev/pts support
+//
+// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
+// in wide use. When both these are available we will be able to switch, but need to keep the old
+// implementation for backwards compat.
+//
+// Note: we can't use systemd to set up the initial limits, and then change the cgroup
+// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
+// This happens at least for v208 when any sibling unit is started.
+func joinDevices(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "devices", pid)
+ // Even if it's `not found` error, we'll return err because devices cgroup
+ // is hard requirement for container security.
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func setKernelMemory(c *configs.Cgroup) error {
+ path, err := getSubsystemPath(c, "memory")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+
+ // This doesn't get called by manager.Set, so we need to do it here.
+ s := &fs.MemoryGroup{}
+ return s.SetKernelMemory(path, c)
+}
+
+func joinMemory(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "memory", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+// systemd does not atm set up the cpuset controller, so we must manually
+// join it. Additionally that is a very finicky controller where each
+// level must have a full setup as the default for a new directory is "no cpus"
+func joinCpuset(c *configs.Cgroup, pid int) error {
+ path, err := getSubsystemPath(c, "cpuset")
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+
+ s := &fs.CpusetGroup{}
+
+ return s.ApplyDir(path, c, pid)
+}
+
+// `BlockIODeviceWeight` property of systemd does not work properly, and systemd
+// expects device path instead of major minor numbers, which is also confusing
+// for users. So we use fs work around for now.
+func joinBlkio(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "blkio", pid)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func joinHugetlb(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "hugetlb", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func joinPerfEvent(c *configs.Cgroup, pid int) error {
+ _, err := join(c, "perf_event", pid)
+ if err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
new file mode 100644
index 0000000..88620aa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -0,0 +1,361 @@
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/docker/go-units"
+)
+
+const cgroupNamePrefix = "name="
+
+// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
+func FindCgroupMountpoint(subsystem string) (string, error) {
+ // We are not using mount.GetMounts() because it's super-inefficient,
+ // parsing it directly sped up x10 times because of not using Sscanf.
+ // It was one of two major performance drawbacks in container start.
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Split(txt, " ")
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], fields[3], nil
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", "", err
+ }
+
+ return "", "", NewNotFoundError(subsystem)
+}
+
+func FindCgroupMountpointDir() (string, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ text := scanner.Text()
+ fields := strings.Split(text, " ")
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ numPostFields := len(postSeparatorFields)
+
+ // This is an error as we can't detect if the mount is for "cgroup"
+ if numPostFields == 0 {
+ return "", fmt.Errorf("Found no fields post '-' in %q", text)
+ }
+
+ if postSeparatorFields[0] == "cgroup" {
+ // Check that the mount is properly formated.
+ if numPostFields < 3 {
+ return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ return filepath.Dir(fields[4]), nil
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+
+ return "", NewNotFoundError("cgroup")
+}
+
+type Mount struct {
+ Mountpoint string
+ Root string
+ Subsystems []string
+}
+
+func (m Mount) GetThisCgroupDir(cgroups map[string]string) (string, error) {
+ if len(m.Subsystems) == 0 {
+ return "", fmt.Errorf("no subsystem for mount")
+ }
+
+ return getControllerPath(m.Subsystems[0], cgroups)
+}
+
+func GetCgroupMounts() ([]Mount, error) {
+ mounts, err := mount.GetMounts()
+ if err != nil {
+ return nil, err
+ }
+
+ all, err := GetAllSubsystems()
+ if err != nil {
+ return nil, err
+ }
+
+ allMap := make(map[string]bool)
+ for _, s := range all {
+ allMap[s] = true
+ }
+
+ res := []Mount{}
+ for _, mount := range mounts {
+ if mount.Fstype == "cgroup" {
+ m := Mount{Mountpoint: mount.Mountpoint, Root: mount.Root}
+
+ for _, opt := range strings.Split(mount.VfsOpts, ",") {
+ if strings.HasPrefix(opt, cgroupNamePrefix) {
+ m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
+ }
+ if allMap[opt] {
+ m.Subsystems = append(m.Subsystems, opt)
+ }
+ }
+ res = append(res, m)
+ }
+ }
+ return res, nil
+}
+
+// Returns all the cgroup subsystems supported by the kernel
+func GetAllSubsystems() ([]string, error) {
+ f, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ subsystems := []string{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ text := s.Text()
+ if text[0] != '#' {
+ parts := strings.Fields(text)
+ if len(parts) >= 4 && parts[3] != "0" {
+ subsystems = append(subsystems, parts[0])
+ }
+ }
+ }
+ return subsystems, nil
+}
+
+// Returns the relative path to the cgroup docker is running in.
+func GetThisCgroupDir(subsystem string) (string, error) {
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetInitCgroupDir(subsystem string) (string, error) {
+
+ cgroups, err := ParseCgroupFile("/proc/1/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func readProcsFile(dir string) ([]int, error) {
+ f, err := os.Open(filepath.Join(dir, "cgroup.procs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var (
+ s = bufio.NewScanner(f)
+ out = []int{}
+ )
+
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ pid, err := strconv.Atoi(t)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, pid)
+ }
+ }
+ return out, nil
+}
+
+func ParseCgroupFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ cgroups := make(map[string]string)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ parts := strings.Split(text, ":")
+
+ for _, subs := range strings.Split(parts[1], ",") {
+ cgroups[subs] = parts[2]
+ }
+ }
+ return cgroups, nil
+}
+
+func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+
+ if p, ok := cgroups[subsystem]; ok {
+ return p, nil
+ }
+
+ if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok {
+ return p, nil
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
+
+func PathExists(path string) bool {
+ if _, err := os.Stat(path); err != nil {
+ return false
+ }
+ return true
+}
+
+func EnterPid(cgroupPaths map[string]string, pid int) error {
+ for _, path := range cgroupPaths {
+ if PathExists(path) {
+ if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"),
+ []byte(strconv.Itoa(pid)), 0700); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// RemovePaths iterates over the provided paths removing them.
+// We trying to remove all paths five times with increasing delay between tries.
+// If after all there are not removed cgroups - appropriate error will be
+// returned.
+func RemovePaths(paths map[string]string) (err error) {
+ delay := 10 * time.Millisecond
+ for i := 0; i < 5; i++ {
+ if i != 0 {
+ time.Sleep(delay)
+ delay *= 2
+ }
+ for s, p := range paths {
+ os.RemoveAll(p)
+ // TODO: here probably should be logging
+ _, err := os.Stat(p)
+ // We need this strange way of checking cgroups existence because
+ // RemoveAll almost always returns error, even on already removed
+ // cgroups
+ if os.IsNotExist(err) {
+ delete(paths, s)
+ }
+ }
+ if len(paths) == 0 {
+ return nil
+ }
+ }
+ return fmt.Errorf("Failed to remove paths: %s", paths)
+}
+
+func GetHugePageSize() ([]string, error) {
+ var pageSizes []string
+ sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"}
+ files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+ if err != nil {
+ return pageSizes, err
+ }
+ for _, st := range files {
+ nameArray := strings.Split(st.Name(), "-")
+ pageSize, err := units.RAMInBytes(nameArray[1])
+ if err != nil {
+ return []string{}, err
+ }
+ sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)
+ pageSizes = append(pageSizes, sizeString)
+ }
+
+ return pageSizes, nil
+}
+
+// GetPids returns all pids, that were added to cgroup at path.
+func GetPids(path string) ([]int, error) {
+ return readProcsFile(path)
+}
+
+// GetAllPids returns all pids, that were added to cgroup at path and to all its
+// subcgroups.
+func GetAllPids(path string) ([]int, error) {
+ var pids []int
+ // collect pids from all sub-cgroups
+ err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
+ dir, file := filepath.Split(p)
+ if file != "cgroup.procs" {
+ return nil
+ }
+ if iErr != nil {
+ return iErr
+ }
+ cPids, err := readProcsFile(dir)
+ if err != nil {
+ return err
+ }
+ pids = append(pids, cPids...)
+ return nil
+ })
+ return pids, err
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go
new file mode 100644
index 0000000..c7bdf1f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go
@@ -0,0 +1,10 @@
+// +build linux,!go1.5
+
+package libcontainer
+
+import "syscall"
+
+// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building
+// with earlier versions
+func enableSetgroups(sys *syscall.SysProcAttr) {
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
new file mode 100644
index 0000000..e0f3ca1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -0,0 +1,61 @@
+package configs
+
+import "fmt"
+
+// blockIODevice holds major:minor format supported in blkio cgroup
+type blockIODevice struct {
+ // Major is the device's major number
+ Major int64 `json:"major"`
+ // Minor is the device's minor number
+ Minor int64 `json:"minor"`
+}
+
+// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
+type WeightDevice struct {
+ blockIODevice
+ // Weight is the bandwidth rate for the device, range is from 10 to 1000
+ Weight uint16 `json:"weight"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ LeafWeight uint16 `json:"leafWeight"`
+}
+
+// NewWeightDevice returns a configured WeightDevice pointer
+func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice {
+ wd := &WeightDevice{}
+ wd.Major = major
+ wd.Minor = minor
+ wd.Weight = weight
+ wd.LeafWeight = leafWeight
+ return wd
+}
+
+// WeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) WeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)
+}
+
+// LeafWeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) LeafWeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)
+}
+
+// ThrottleDevice struct holds a `major:minor rate_per_second` pair
+type ThrottleDevice struct {
+ blockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// NewThrottleDevice returns a configured ThrottleDevice pointer
+func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
+ td := &ThrottleDevice{}
+ td.Major = major
+ td.Minor = minor
+ td.Rate = rate
+ return td
+}
+
+// String formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) String() string {
+ return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go
new file mode 100644
index 0000000..c186d28
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go
@@ -0,0 +1,111 @@
+// +build linux freebsd
+
+package configs
+
+type FreezerState string
+
+const (
+ Undefined FreezerState = ""
+ Frozen FreezerState = "FROZEN"
+ Thawed FreezerState = "THAWED"
+)
+
+type Cgroup struct {
+ Name string `json:"name"`
+
+ // name of parent cgroup or slice
+ Parent string `json:"parent"`
+
+ // ScopePrefix decribes prefix for the scope name
+ ScopePrefix string `json:"scope_prefix"`
+
+ // Paths represent the cgroups paths to join
+ Paths map[string]string
+
+ // Resources contains various cgroups settings to apply
+ *Resources
+}
+
+type Resources struct {
+ // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
+ AllowAllDevices bool `json:"allow_all_devices"`
+
+ AllowedDevices []*Device `json:"allowed_devices"`
+
+ DeniedDevices []*Device `json:"denied_devices"`
+
+ // Memory limit (in bytes)
+ Memory int64 `json:"memory"`
+
+ // Memory reservation or soft_limit (in bytes)
+ MemoryReservation int64 `json:"memory_reservation"`
+
+ // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwap int64 `json:"memory_swap"`
+
+ // Kernel memory limit (in bytes)
+ KernelMemory int64 `json:"kernel_memory"`
+
+ // CPU shares (relative weight vs. other containers)
+ CpuShares int64 `json:"cpu_shares"`
+
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ CpuQuota int64 `json:"cpu_quota"`
+
+ // CPU period to be used for hardcapping (in usecs). 0 to use system default.
+ CpuPeriod int64 `json:"cpu_period"`
+
+ // How many time CPU will use in realtime scheduling (in usecs).
+ CpuRtRuntime int64 `json:"cpu_quota"`
+
+ // CPU period to be used for realtime scheduling (in usecs).
+ CpuRtPeriod int64 `json:"cpu_period"`
+
+ // CPU to use
+ CpusetCpus string `json:"cpuset_cpus"`
+
+ // MEM to use
+ CpusetMems string `json:"cpuset_mems"`
+
+ // Process limit; set <= `0' to disable limit.
+ PidsLimit int64 `json:"pids_limit"`
+
+ // Specifies per cgroup weight, range is from 10 to 1000.
+ BlkioWeight uint16 `json:"blkio_weight"`
+
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
+
+ // Weight per cgroup per device, can override BlkioWeight.
+ BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
+
+ // IO read rate limit per cgroup per device, bytes per second.
+ BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
+
+ // IO write rate limit per cgroup per divice, bytes per second.
+ BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
+
+ // IO read rate limit per cgroup per device, IO per second.
+ BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
+
+ // IO write rate limit per cgroup per device, IO per second.
+ BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
+
+ // set the freeze value for the process
+ Freezer FreezerState `json:"freezer"`
+
+ // Hugetlb limit (in bytes)
+ HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
+
+ // Whether to disable OOM Killer
+ OomKillDisable bool `json:"oom_kill_disable"`
+
+ // Tuning swappiness behaviour per cgroup
+ MemorySwappiness int64 `json:"memory_swappiness"`
+
+ // Set priority of network traffic for container
+ NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
+
+ // Set class identifier for container's network packets
+ NetClsClassid string `json:"net_cls_classid"`
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
new file mode 100644
index 0000000..95e2830
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
@@ -0,0 +1,6 @@
+// +build !windows,!linux,!freebsd
+
+package configs
+
+type Cgroup struct {
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
new file mode 100644
index 0000000..d74847b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go
@@ -0,0 +1,6 @@
+package configs
+
+// TODO Windows: This can ultimately be entirely factored out on Windows as
+// cgroups are a Unix-specific construct.
+type Cgroup struct {
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/config.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/config.go
new file mode 100644
index 0000000..069daae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -0,0 +1,252 @@
+package configs
+
+import (
+ "bytes"
+ "encoding/json"
+ "os/exec"
+)
+
+type Rlimit struct {
+ Type int `json:"type"`
+ Hard uint64 `json:"hard"`
+ Soft uint64 `json:"soft"`
+}
+
+// IDMap represents UID/GID Mappings for User Namespaces.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+// Seccomp represents syscall restrictions
+// By default, only the native architecture of the kernel is allowed to be used
+// for syscalls. Additional architectures can be added by specifying them in
+// Architectures.
+type Seccomp struct {
+ DefaultAction Action `json:"default_action"`
+ Architectures []string `json:"architectures"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// An action to be taken upon rule match in Seccomp
+type Action int
+
+const (
+ Kill Action = iota + 1
+ Errno
+ Trap
+ Allow
+ Trace
+)
+
+// A comparison operator to be used when matching syscall arguments in Seccomp
+type Operator int
+
+const (
+ EqualTo Operator = iota + 1
+ NotEqualTo
+ GreaterThan
+ GreaterThanOrEqualTo
+ LessThan
+ LessThanOrEqualTo
+ MaskEqualTo
+)
+
+// A rule to match a specific syscall argument in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"value_two"`
+ Op Operator `json:"op"`
+}
+
+// An rule to match a syscall in Seccomp
+type Syscall struct {
+ Name string `json:"name"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+}
+
+// TODO Windows. Many of these fields should be factored out into those parts
+// which are common across platforms, and those which are platform specific.
+
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
+ // This is a common option when the container is running in ramdisk
+ NoPivotRoot bool `json:"no_pivot_root"`
+
+ // ParentDeathSignal specifies the signal that is sent to the container's process in the case
+ // that the parent process dies.
+ ParentDeathSignal int `json:"parent_death_signal"`
+
+ // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set.
+ // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable.
+ // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot.
+ PivotDir string `json:"pivot_dir"`
+
+ // Path to a directory containing the container's root filesystem.
+ Rootfs string `json:"rootfs"`
+
+ // Readonlyfs will remount the container's rootfs as readonly where only externally mounted
+ // bind mounts are writtable.
+ Readonlyfs bool `json:"readonlyfs"`
+
+ // Specifies the mount propagation flags to be applied to /.
+ RootPropagation int `json:"rootPropagation"`
+
+ // Mounts specify additional source and destination paths that will be mounted inside the container's
+ // rootfs and mount namespace if specified
+ Mounts []*Mount `json:"mounts"`
+
+ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
+ Devices []*Device `json:"devices"`
+
+ MountLabel string `json:"mount_label"`
+
+ // Hostname optionally sets the container's hostname if provided
+ Hostname string `json:"hostname"`
+
+ // Namespaces specifies the container's namespaces that it should setup when cloning the init process
+ // If a namespace is not provided that namespace is shared from the container's parent process
+ Namespaces Namespaces `json:"namespaces"`
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capbilities not specified will be dropped from the processes capability mask
+ Capabilities []string `json:"capabilities"`
+
+ // Networks specifies the container's network setup to be created
+ Networks []*Network `json:"networks"`
+
+ // Routes can be specified to create entries in the route table as the container is started
+ Routes []*Route `json:"routes"`
+
+ // Cgroups specifies specific cgroup settings for the various subsystems that the container is
+ // placed into to limit the resources the container has available
+ Cgroups *Cgroup `json:"cgroups"`
+
+ // AppArmorProfile specifies the profile to apply to the process running in the container and is
+ // change at the time the process is execed
+ AppArmorProfile string `json:"apparmor_profile"`
+
+ // ProcessLabel specifies the label to apply to the process running in the container. It is
+ // commonly used by selinux
+ ProcessLabel string `json:"process_label"`
+
+ // Rlimits specifies the resource limits, such as max open files, to set in the container
+ // If Rlimits are not set, the container will inherit rlimits from the parent process
+ Rlimits []Rlimit `json:"rlimits"`
+
+ // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
+ // for a process. Valid values are between the range [-1000, '1000'], where processes with
+ // higher scores are preferred for being killed.
+ // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
+ OomScoreAdj int `json:"oom_score_adj"`
+
+ // AdditionalGroups specifies the gids that should be added to supplementary groups
+ // in addition to those that the user belongs to.
+ AdditionalGroups []string `json:"additional_groups"`
+
+ // UidMappings is an array of User ID mappings for User Namespaces
+ UidMappings []IDMap `json:"uid_mappings"`
+
+ // GidMappings is an array of Group ID mappings for User Namespaces
+ GidMappings []IDMap `json:"gid_mappings"`
+
+ // MaskPaths specifies paths within the container's rootfs to mask over with a bind
+ // mount pointing to /dev/null as to prevent reads of the file.
+ MaskPaths []string `json:"mask_paths"`
+
+ // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
+ // so that these files prevent any writes.
+ ReadonlyPaths []string `json:"readonly_paths"`
+
+ // Sysctl is a map of properties and their values. It is the equivalent of using
+ // sysctl -w my.property.name value in Linux.
+ Sysctl map[string]string `json:"sysctl"`
+
+ // Seccomp allows actions to be taken whenever a syscall is made within the container.
+ // A number of rules are given, each having an action to be taken if a syscall matches it.
+ // A default action to be taken if no rules match is also given.
+ Seccomp *Seccomp `json:"seccomp"`
+
+ // Hooks are a collection of actions to perform at various container lifecycle events.
+ // Hooks are not able to be marshaled to json but they are also not needed to.
+ Hooks *Hooks `json:"-"`
+
+ // Version is the version of opencontainer specification that is supported.
+ Version string `json:"version"`
+}
+
+type Hooks struct {
+ // Prestart commands are executed after the container namespaces are created,
+ // but before the user supplied command is executed from init.
+ Prestart []Hook
+
+ // Poststart commands are executed after the container init process starts.
+ Poststart []Hook
+
+ // Poststop commands are executed after the container init process exits.
+ Poststop []Hook
+}
+
+// HookState is the payload provided to a hook on execution.
+type HookState struct {
+ Version string `json:"version"`
+ ID string `json:"id"`
+ Pid int `json:"pid"`
+ Root string `json:"root"`
+}
+
+type Hook interface {
+ // Run executes the hook with the provided state.
+ Run(HookState) error
+}
+
+// NewFunctionHooks will call the provided function when the hook is run.
+func NewFunctionHook(f func(HookState) error) FuncHook {
+ return FuncHook{
+ run: f,
+ }
+}
+
+type FuncHook struct {
+ run func(HookState) error
+}
+
+func (f FuncHook) Run(s HookState) error {
+ return f.run(s)
+}
+
+type Command struct {
+ Path string `json:"path"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Dir string `json:"dir"`
+}
+
+// NewCommandHooks will execute the provided command when the hook is run.
+func NewCommandHook(cmd Command) CommandHook {
+ return CommandHook{
+ Command: cmd,
+ }
+}
+
+type CommandHook struct {
+ Command
+}
+
+func (c Command) Run(s HookState) error {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ cmd := exec.Cmd{
+ Path: c.Path,
+ Args: c.Args,
+ Env: c.Env,
+ Stdin: bytes.NewReader(b),
+ }
+ return cmd.Run()
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/config_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/config_unix.go
new file mode 100644
index 0000000..c447f3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/config_unix.go
@@ -0,0 +1,51 @@
+// +build freebsd linux
+
+package configs
+
+import "fmt"
+
+// Gets the root uid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostUID() (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.UidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no user mappings found.")
+ }
+ id, found := c.hostIDFromMapping(0, c.UidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no root user mapping found.")
+ }
+ return id, nil
+ }
+ // Return default root uid 0
+ return 0, nil
+}
+
+// Gets the root gid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostGID() (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.GidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(0, c.GidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no root group mapping found.")
+ }
+ return id, nil
+ }
+ // Return default root gid 0
+ return 0, nil
+}
+
+// Utility function that gets a host ID for a container ID from user namespace map
+// if that ID is present in the map.
+func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
+ for _, m := range uMap {
+ if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (containerID - m.ContainerID)
+ return hostID, true
+ }
+ }
+ return -1, false
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/device.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/device.go
new file mode 100644
index 0000000..a52a024
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/device.go
@@ -0,0 +1,54 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ Wildcard = -1
+)
+
+// TODO Windows: This can be factored out in the future
+
+type Device struct {
+ // Device type, block, char, etc.
+ Type rune `json:"type"`
+
+ // Path to the device.
+ Path string `json:"path"`
+
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+
+ // Cgroup permissions format, rwm.
+ Permissions string `json:"permissions"`
+
+ // FileMode permission bits for the device.
+ FileMode os.FileMode `json:"file_mode"`
+
+ // Uid of the device.
+ Uid uint32 `json:"uid"`
+
+ // Gid of the device.
+ Gid uint32 `json:"gid"`
+}
+
+func (d *Device) CgroupString() string {
+ return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
+}
+
+func (d *Device) Mkdev() int {
+ return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
+}
+
+// deviceNumberString converts the device number to a string return result.
+func deviceNumberString(number int64) string {
+ if number == Wildcard {
+ return "*"
+ }
+ return fmt.Sprint(number)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
new file mode 100644
index 0000000..e452992
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go
@@ -0,0 +1,125 @@
+// +build linux freebsd
+
+package configs
+
+var (
+ // These are devices that are to be both allowed and created.
+ DefaultSimpleDevices = []*Device{
+ // /dev/null and zero
+ {
+ Path: "/dev/null",
+ Type: 'c',
+ Major: 1,
+ Minor: 3,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/zero",
+ Type: 'c',
+ Major: 1,
+ Minor: 5,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ {
+ Path: "/dev/full",
+ Type: 'c',
+ Major: 1,
+ Minor: 7,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // consoles and ttys
+ {
+ Path: "/dev/tty",
+ Type: 'c',
+ Major: 5,
+ Minor: 0,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+
+ // /dev/urandom,/dev/random
+ {
+ Path: "/dev/urandom",
+ Type: 'c',
+ Major: 1,
+ Minor: 9,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ {
+ Path: "/dev/random",
+ Type: 'c',
+ Major: 1,
+ Minor: 8,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ }
+ DefaultAllowedDevices = append([]*Device{
+ // allow mknod for any device
+ {
+ Type: 'c',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+ {
+ Type: 'b',
+ Major: Wildcard,
+ Minor: Wildcard,
+ Permissions: "m",
+ },
+
+ {
+ Path: "/dev/console",
+ Type: 'c',
+ Major: 5,
+ Minor: 1,
+ Permissions: "rwm",
+ },
+ // /dev/pts/ - pts namespaces are "coming soon"
+ {
+ Path: "",
+ Type: 'c',
+ Major: 136,
+ Minor: Wildcard,
+ Permissions: "rwm",
+ },
+ {
+ Path: "",
+ Type: 'c',
+ Major: 5,
+ Minor: 2,
+ Permissions: "rwm",
+ },
+
+ // tuntap
+ {
+ Path: "",
+ Type: 'c',
+ Major: 10,
+ Minor: 200,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+ DefaultAutoCreatedDevices = append([]*Device{
+ {
+ // /dev/fuse is created but not allowed.
+ // This is to allow java to work. Because java
+ // Insists on there being a /dev/fuse
+ // https://github.com/docker/docker/issues/514
+ // https://github.com/docker/docker/issues/2393
+ //
+ Path: "/dev/fuse",
+ Type: 'c',
+ Major: 10,
+ Minor: 229,
+ Permissions: "rwm",
+ },
+ }, DefaultSimpleDevices...)
+)
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
new file mode 100644
index 0000000..d302163
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
@@ -0,0 +1,9 @@
+package configs
+
+type HugepageLimit struct {
+ // which type of hugepage to limit.
+ Pagesize string `json:"page_size"`
+
+ // usage limit for hugepage.
+ Limit uint64 `json:"limit"`
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
new file mode 100644
index 0000000..9a0395e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
@@ -0,0 +1,14 @@
+package configs
+
+import (
+ "fmt"
+)
+
+type IfPrioMap struct {
+ Interface string `json:"interface"`
+ Priority int64 `json:"priority"`
+}
+
+func (i *IfPrioMap) CgroupString() string {
+ return fmt.Sprintf("%s %d", i.Interface, i.Priority)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/mount.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/mount.go
new file mode 100644
index 0000000..cc770c9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/mount.go
@@ -0,0 +1,30 @@
+package configs
+
+type Mount struct {
+ // Source path for the mount.
+ Source string `json:"source"`
+
+ // Destination path for the mount inside the container.
+ Destination string `json:"destination"`
+
+ // Device the mount is for.
+ Device string `json:"device"`
+
+ // Mount flags.
+ Flags int `json:"flags"`
+
+ // Propagation Flags
+ PropagationFlags []int `json:"propagation_flags"`
+
+ // Mount data applied to the mount.
+ Data string `json:"data"`
+
+ // Relabel source if set, "z" indicates shared, "Z" indicates unshared.
+ Relabel string `json:"relabel"`
+
+ // Optional Command to be run before Source is mounted.
+ PremountCmds []Command `json:"premount_cmds"`
+
+ // Optional Command to be run after Source is mounted.
+ PostmountCmds []Command `json:"postmount_cmds"`
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
new file mode 100644
index 0000000..a3329a3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
@@ -0,0 +1,5 @@
+package configs
+
+type NamespaceType string
+
+type Namespaces []Namespace
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 0000000..c962999
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package configs
+
+import "syscall"
+
+func (n *Namespace) Syscall() int {
+ return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+ NEWNET: syscall.CLONE_NEWNET,
+ NEWNS: syscall.CLONE_NEWNS,
+ NEWUSER: syscall.CLONE_NEWUSER,
+ NEWIPC: syscall.CLONE_NEWIPC,
+ NEWUTS: syscall.CLONE_NEWUTS,
+ NEWPID: syscall.CLONE_NEWPID,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This functions returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ var flag int
+ for _, v := range *n {
+ if v.Path != "" {
+ continue
+ }
+ flag |= namespaceInfo[v.Type]
+ }
+ return uintptr(flag)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 0000000..1644588
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!windows
+
+package configs
+
+func (n *Namespace) Syscall() int {
+ panic("No namespace syscall support")
+ return 0
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This functions returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ panic("No namespace syscall support")
+ return uintptr(0)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
new file mode 100644
index 0000000..7bc9085
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go
@@ -0,0 +1,89 @@
+// +build linux freebsd
+
+package configs
+
+import "fmt"
+
+const (
+ NEWNET NamespaceType = "NEWNET"
+ NEWPID NamespaceType = "NEWPID"
+ NEWNS NamespaceType = "NEWNS"
+ NEWUTS NamespaceType = "NEWUTS"
+ NEWIPC NamespaceType = "NEWIPC"
+ NEWUSER NamespaceType = "NEWUSER"
+)
+
+func NamespaceTypes() []NamespaceType {
+ return []NamespaceType{
+ NEWNET,
+ NEWPID,
+ NEWNS,
+ NEWUTS,
+ NEWIPC,
+ NEWUSER,
+ }
+}
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+ Type NamespaceType `json:"type"`
+ Path string `json:"path"`
+}
+
+func (n *Namespace) GetPath(pid int) string {
+ if n.Path != "" {
+ return n.Path
+ }
+ return fmt.Sprintf("/proc/%d/ns/%s", pid, n.file())
+}
+
+func (n *Namespace) file() string {
+ file := ""
+ switch n.Type {
+ case NEWNET:
+ file = "net"
+ case NEWNS:
+ file = "mnt"
+ case NEWPID:
+ file = "pid"
+ case NEWIPC:
+ file = "ipc"
+ case NEWUSER:
+ file = "user"
+ case NEWUTS:
+ file = "uts"
+ }
+ return file
+}
+
+func (n *Namespaces) Remove(t NamespaceType) bool {
+ i := n.index(t)
+ if i == -1 {
+ return false
+ }
+ *n = append((*n)[:i], (*n)[i+1:]...)
+ return true
+}
+
+func (n *Namespaces) Add(t NamespaceType, path string) {
+ i := n.index(t)
+ if i == -1 {
+ *n = append(*n, Namespace{Type: t, Path: path})
+ return
+ }
+ (*n)[i].Path = path
+}
+
+func (n *Namespaces) index(t NamespaceType) int {
+ for i, ns := range *n {
+ if ns.Type == t {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n *Namespaces) Contains(t NamespaceType) bool {
+ return n.index(t) != -1
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
new file mode 100644
index 0000000..9a74033
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!freebsd
+
+package configs
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/network.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/network.go
new file mode 100644
index 0000000..ccdb228
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/network.go
@@ -0,0 +1,72 @@
+package configs
+
+// Network defines configuration for a container's networking stack
+//
+// The network configuration can be omitted from a container causing the
+// container to be setup with the host's networking stack
+type Network struct {
+ // Type sets the networks type, commonly veth and loopback
+ Type string `json:"type"`
+
+ // Name of the network interface
+ Name string `json:"name"`
+
+ // The bridge to use.
+ Bridge string `json:"bridge"`
+
+ // MacAddress contains the MAC address to set on the network interface
+ MacAddress string `json:"mac_address"`
+
+ // Address contains the IPv4 and mask to set on the network interface
+ Address string `json:"address"`
+
+ // Gateway sets the gateway address that is used as the default for the interface
+ Gateway string `json:"gateway"`
+
+ // IPv6Address contains the IPv6 and mask to set on the network interface
+ IPv6Address string `json:"ipv6_address"`
+
+ // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
+ IPv6Gateway string `json:"ipv6_gateway"`
+
+ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ Mtu int `json:"mtu"`
+
+ // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ TxQueueLen int `json:"txqueuelen"`
+
+ // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the
+ // container.
+ HostInterfaceName string `json:"host_interface_name"`
+
+ // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ // bridge port in the case of type veth
+ // Note: This is unsupported on some systems.
+ // Note: This does not apply to loopback interfaces.
+ HairpinMode bool `json:"hairpin_mode"`
+}
+
+// Routes can be specified to create entries in the route table as the container is started
+//
+// All of destination, source, and gateway should be either IPv4 or IPv6.
+// One of the three options must be present, and omitted entries will use their
+// IP family default for the route table. For IPv4 for example, setting the
+// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
+// destination of 0.0.0.0(or *) when viewed in the route table.
+type Route struct {
+ // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Destination string `json:"destination"`
+
+ // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Source string `json:"source"`
+
+ // Sets the gateway. Accepts IPv4 and IPv6
+ Gateway string `json:"gateway"`
+
+ // The device to set this route up for, for example: eth0
+ InterfaceName string `json:"interface_name"`
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/validate/config.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/validate/config.go
new file mode 100644
index 0000000..848a67c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/configs/validate/config.go
@@ -0,0 +1,93 @@
+package validate
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Validator interface {
+ Validate(*configs.Config) error
+}
+
+func New() Validator {
+ return &ConfigValidator{}
+}
+
+type ConfigValidator struct {
+}
+
+func (v *ConfigValidator) Validate(config *configs.Config) error {
+ if err := v.rootfs(config); err != nil {
+ return err
+ }
+ if err := v.network(config); err != nil {
+ return err
+ }
+ if err := v.hostname(config); err != nil {
+ return err
+ }
+ if err := v.security(config); err != nil {
+ return err
+ }
+ if err := v.usernamespace(config); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rootfs validates the the rootfs is an absolute path and is not a symlink
+// to the container's root filesystem.
+func (v *ConfigValidator) rootfs(config *configs.Config) error {
+ cleaned, err := filepath.Abs(config.Rootfs)
+ if err != nil {
+ return err
+ }
+ if cleaned, err = filepath.EvalSymlinks(cleaned); err != nil {
+ return err
+ }
+ if config.Rootfs != cleaned {
+ return fmt.Errorf("%s is not an absolute path or is a symlink", config.Rootfs)
+ }
+ return nil
+}
+
+func (v *ConfigValidator) network(config *configs.Config) error {
+ if !config.Namespaces.Contains(configs.NEWNET) {
+ if len(config.Networks) > 0 || len(config.Routes) > 0 {
+ return fmt.Errorf("unable to apply network settings without a private NET namespace")
+ }
+ }
+ return nil
+}
+
+func (v *ConfigValidator) hostname(config *configs.Config) error {
+ if config.Hostname != "" && !config.Namespaces.Contains(configs.NEWUTS) {
+ return fmt.Errorf("unable to set hostname without a private UTS namespace")
+ }
+ return nil
+}
+
+func (v *ConfigValidator) security(config *configs.Config) error {
+ // restrict sys without mount namespace
+ if (len(config.MaskPaths) > 0 || len(config.ReadonlyPaths) > 0) &&
+ !config.Namespaces.Contains(configs.NEWNS) {
+ return fmt.Errorf("unable to restrict sys entries without a private MNT namespace")
+ }
+ return nil
+}
+
+func (v *ConfigValidator) usernamespace(config *configs.Config) error {
+ if config.Namespaces.Contains(configs.NEWUSER) {
+ if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
+ return fmt.Errorf("USER namespaces aren't enabled in the kernel")
+ }
+ } else {
+ if config.UidMappings != nil || config.GidMappings != nil {
+ return fmt.Errorf("User namespace mappings specified, but USER namespace isn't enabled in the config")
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console.go
new file mode 100644
index 0000000..042a2a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console.go
@@ -0,0 +1,15 @@
+package libcontainer
+
+import "io"
+
+// Console represents a pseudo TTY.
+type Console interface {
+ io.ReadWriter
+ io.Closer
+
+ // Path returns the filesystem path to the slave side of the pty.
+ Path() string
+
+ // Fd returns the fd for the master of the pty.
+ Fd() uintptr
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_freebsd.go
new file mode 100644
index 0000000..3c89eda
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_freebsd.go
@@ -0,0 +1,13 @@
+// +build freebsd
+
+package libcontainer
+
+import (
+ "errors"
+)
+
+// NewConsole returns an initalized console that can be used within a container by copying bytes
+// from the master side to the slave that is attached as the tty for the container's init process.
+func NewConsole(uid, gid int) (Console, error) {
+ return nil, errors.New("libcontainer console is not supported on FreeBSD")
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_linux.go
new file mode 100644
index 0000000..7af771b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_linux.go
@@ -0,0 +1,145 @@
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "unsafe"
+
+ "github.com/opencontainers/runc/libcontainer/label"
+)
+
+// NewConsole returns an initalized console that can be used within a container by copying bytes
+// from the master side to the slave that is attached as the tty for the container's init process.
+func NewConsole(uid, gid int) (Console, error) {
+ master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+ console, err := ptsname(master)
+ if err != nil {
+ return nil, err
+ }
+ if err := unlockpt(master); err != nil {
+ return nil, err
+ }
+ if err := os.Chmod(console, 0600); err != nil {
+ return nil, err
+ }
+ if err := os.Chown(console, uid, gid); err != nil {
+ return nil, err
+ }
+ return &linuxConsole{
+ slavePath: console,
+ master: master,
+ }, nil
+}
+
+// newConsoleFromPath is an internal function returning an initialized console for use inside
+// a container's MNT namespace.
+func newConsoleFromPath(slavePath string) *linuxConsole {
+ return &linuxConsole{
+ slavePath: slavePath,
+ }
+}
+
+// linuxConsole is a linux psuedo TTY for use within a container.
+type linuxConsole struct {
+ master *os.File
+ slavePath string
+}
+
+func (c *linuxConsole) Fd() uintptr {
+ return c.master.Fd()
+}
+
+func (c *linuxConsole) Path() string {
+ return c.slavePath
+}
+
+func (c *linuxConsole) Read(b []byte) (int, error) {
+ return c.master.Read(b)
+}
+
+func (c *linuxConsole) Write(b []byte) (int, error) {
+ return c.master.Write(b)
+}
+
+func (c *linuxConsole) Close() error {
+ if m := c.master; m != nil {
+ return m.Close()
+ }
+ return nil
+}
+
+// mount initializes the console inside the rootfs mounting with the specified mount label
+// and applying the correct ownership of the console.
+func (c *linuxConsole) mount(rootfs, mountLabel string) error {
+ oldMask := syscall.Umask(0000)
+ defer syscall.Umask(oldMask)
+ if err := label.SetFileLabel(c.slavePath, mountLabel); err != nil {
+ return err
+ }
+ dest := filepath.Join(rootfs, "/dev/console")
+ f, err := os.Create(dest)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ if f != nil {
+ f.Close()
+ }
+ return syscall.Mount(c.slavePath, dest, "bind", syscall.MS_BIND, "")
+}
+
+// dupStdio opens the slavePath for the console and dups the fds to the current
+// processes stdio, fd 0,1,2.
+func (c *linuxConsole) dupStdio() error {
+ slave, err := c.open(syscall.O_RDWR)
+ if err != nil {
+ return err
+ }
+ fd := int(slave.Fd())
+ for _, i := range []int{0, 1, 2} {
+ if err := syscall.Dup3(fd, i, 0); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave.
+func (c *linuxConsole) open(flag int) (*os.File, error) {
+ r, e := syscall.Open(c.slavePath, flag, 0)
+ if e != nil {
+ return nil, &os.PathError{
+ Op: "open",
+ Path: c.slavePath,
+ Err: e,
+ }
+ }
+ return os.NewFile(uintptr(r), c.slavePath), nil
+}
+
+func ioctl(fd uintptr, flag, data uintptr) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+ var u int32
+ return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+ var n int32
+ if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_windows.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_windows.go
new file mode 100644
index 0000000..a68c02f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/console_windows.go
@@ -0,0 +1,30 @@
+package libcontainer
+
+// NewConsole returns an initalized console that can be used within a container
+func NewConsole(uid, gid int) (Console, error) {
+ return &windowsConsole{}, nil
+}
+
+// windowsConsole is a Windows psuedo TTY for use within a container.
+type windowsConsole struct {
+}
+
+func (c *windowsConsole) Fd() uintptr {
+ return 0
+}
+
+func (c *windowsConsole) Path() string {
+ return ""
+}
+
+func (c *windowsConsole) Read(b []byte) (int, error) {
+ return 0, nil
+}
+
+func (c *windowsConsole) Write(b []byte) (int, error) {
+ return 0, nil
+}
+
+func (c *windowsConsole) Close() error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container.go
new file mode 100644
index 0000000..03c8c55
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container.go
@@ -0,0 +1,145 @@
+// Libcontainer provides a native Go implementation for creating containers
+// with namespaces, cgroups, capabilities, and filesystem access controls.
+// It allows you to manage the lifecycle of the container performing additional operations
+// after the container is created.
+package libcontainer
+
+import (
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+// The status of a container.
+type Status int
+
+const (
+ // The container exists but has not been run yet
+ Created Status = iota
+
+ // The container exists and is running.
+ Running
+
+ // The container exists, it is in the process of being paused.
+ Pausing
+
+ // The container exists, but all its processes are paused.
+ Paused
+
+ // The container exists, but its state is saved on disk
+ Checkpointed
+
+ // The container does not exist.
+ Destroyed
+)
+
+func (s Status) String() string {
+ switch s {
+ case Created:
+ return "created"
+ case Running:
+ return "running"
+ case Pausing:
+ return "pausing"
+ case Paused:
+ return "paused"
+ case Checkpointed:
+ return "checkpointed"
+ case Destroyed:
+ return "destroyed"
+ default:
+ return "unknown"
+ }
+}
+
+// BaseState represents the platform agnostic pieces relating to a
+// running container's state
+type BaseState struct {
+ // ID is the container ID.
+ ID string `json:"id"`
+
+ // InitProcessPid is the init process id in the parent namespace.
+ InitProcessPid int `json:"init_process_pid"`
+
+ // InitProcessStartTime is the init process start time.
+ InitProcessStartTime string `json:"init_process_start"`
+
+ // Config is the container's configuration.
+ Config configs.Config `json:"config"`
+}
+
+// A libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found. BaseContainer includes methods that are platform agnostic.
+type BaseContainer interface {
+ // Returns the ID of the container
+ ID() string
+
+ // Returns the current status of the container.
+ //
+ // errors:
+ // ContainerDestroyed - Container no longer exists,
+ // Systemerror - System error.
+ Status() (Status, error)
+
+ // State returns the current container's state information.
+ //
+ // errors:
+ // Systemerror - System error.
+ State() (*State, error)
+
+ // Returns the current config of the container.
+ Config() configs.Config
+
+ // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process.
+ //
+ // errors:
+ // ContainerDestroyed - Container no longer exists,
+ // Systemerror - System error.
+ //
+ // Some of the returned PIDs may no longer refer to processes in the Container, unless
+ // the Container state is PAUSED in which case every PID in the slice is valid.
+ Processes() ([]int, error)
+
+ // Returns statistics for the container.
+ //
+ // errors:
+ // ContainerDestroyed - Container no longer exists,
+ // Systemerror - System error.
+ Stats() (*Stats, error)
+
+ // Set resources of container as configured
+ //
+ // We can use this to change resources when containers are running.
+ //
+ // errors:
+ // Systemerror - System error.
+ Set(config configs.Config) error
+
+ // Start a process inside the container. Returns error if process fails to
+ // start. You can track process lifecycle with passed Process structure.
+ //
+ // errors:
+ // ContainerDestroyed - Container no longer exists,
+ // ConfigInvalid - config is invalid,
+ // ContainerPaused - Container is paused,
+ // Systemerror - System error.
+ Start(process *Process) (err error)
+
+ // Destroys the container after killing all running processes.
+ //
+ // Any event registrations are removed before the container is destroyed.
+ // No error is returned if the container is already destroyed.
+ //
+ // errors:
+ // Systemerror - System error.
+ Destroy() error
+
+ // Signal sends the provided signal code to the container's initial process.
+ //
+ // errors:
+ // Systemerror - System error.
+ Signal(s os.Signal) error
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_linux.go
new file mode 100644
index 0000000..4015c95
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_linux.go
@@ -0,0 +1,1085 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/golang/protobuf/proto"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/criurpc"
+ "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/vishvananda/netlink/nl"
+)
+
+const stdioFdCount = 3
+
+type linuxContainer struct {
+ id string
+ root string
+ config *configs.Config
+ cgroupManager cgroups.Manager
+ initPath string
+ initArgs []string
+ initProcess parentProcess
+ criuPath string
+ m sync.Mutex
+ criuVersion int
+ state containerState
+}
+
+// State represents a running container's state
+type State struct {
+ BaseState
+
+ // Platform specific fields below here
+
+ // Path to all the cgroups setup for a container. Key is cgroup subsystem name
+ // with the value as the path.
+ CgroupPaths map[string]string `json:"cgroup_paths"`
+
+ // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
+ // with the value as the path.
+ NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
+
+ // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
+ ExternalDescriptors []string `json:"external_descriptors,omitempty"`
+}
+
+// A libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found.
+type Container interface {
+ BaseContainer
+
+ // Methods below here are platform specific
+
+ // Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
+ //
+ // errors:
+ // Systemerror - System error.
+ Checkpoint(criuOpts *CriuOpts) error
+
+ // Restore restores the checkpointed container to a running state using the criu(8) utiity.
+ //
+ // errors:
+ // Systemerror - System error.
+ Restore(process *Process, criuOpts *CriuOpts) error
+
+ // If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses
+ // the execution of any user processes. Asynchronously, when the container finished being paused the
+ // state is changed to PAUSED.
+ // If the Container state is PAUSED, do nothing.
+ //
+ // errors:
+ // ContainerDestroyed - Container no longer exists,
+ // Systemerror - System error.
+ Pause() error
+
+ // If the Container state is PAUSED, resumes the execution of any user processes in the
+ // Container before setting the Container state to RUNNING.
+ // If the Container state is RUNNING, do nothing.
+ //
+ // errors:
+ // ContainerDestroyed - Container no longer exists,
+ // Systemerror - System error.
+ Resume() error
+
+ // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
+ //
+ // errors:
+ // Systemerror - System error.
+ NotifyOOM() (<-chan struct{}, error)
+
+ // NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
+ //
+ // errors:
+ // Systemerror - System error.
+ NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
+}
+
+// ID returns the container's unique ID
+func (c *linuxContainer) ID() string {
+ return c.id
+}
+
+// Config returns the container's configuration
+func (c *linuxContainer) Config() configs.Config {
+ return *c.config
+}
+
+func (c *linuxContainer) Status() (Status, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.currentStatus()
+}
+
+func (c *linuxContainer) State() (*State, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.currentState()
+}
+
+func (c *linuxContainer) Processes() ([]int, error) {
+ pids, err := c.cgroupManager.GetAllPids()
+ if err != nil {
+ return nil, newSystemError(err)
+ }
+ return pids, nil
+}
+
+func (c *linuxContainer) Stats() (*Stats, error) {
+ var (
+ err error
+ stats = &Stats{}
+ )
+ if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
+ return stats, newSystemError(err)
+ }
+ for _, iface := range c.config.Networks {
+ switch iface.Type {
+ case "veth":
+ istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
+ if err != nil {
+ return stats, newSystemError(err)
+ }
+ stats.Interfaces = append(stats.Interfaces, istats)
+ }
+ }
+ return stats, nil
+}
+
+func (c *linuxContainer) Set(config configs.Config) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ c.config = &config
+ return c.cgroupManager.Set(c.config)
+}
+
+func (c *linuxContainer) Start(process *Process) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ doInit := status == Destroyed
+ parent, err := c.newParentProcess(process, doInit)
+ if err != nil {
+ return newSystemError(err)
+ }
+ if err := parent.start(); err != nil {
+ // terminate the process to ensure that it properly is reaped.
+ if err := parent.terminate(); err != nil {
+ logrus.Warn(err)
+ }
+ return newSystemError(err)
+ }
+ c.state = &runningState{
+ c: c,
+ }
+ if doInit {
+ if err := c.updateState(parent); err != nil {
+ return err
+ }
+ if c.config.Hooks != nil {
+ s := configs.HookState{
+ Version: c.config.Version,
+ ID: c.id,
+ Pid: parent.pid(),
+ Root: c.config.Rootfs,
+ }
+ for _, hook := range c.config.Hooks.Poststart {
+ if err := hook.Run(s); err != nil {
+ if err := parent.terminate(); err != nil {
+ logrus.Warn(err)
+ }
+ return newSystemError(err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (c *linuxContainer) Signal(s os.Signal) error {
+ if err := c.initProcess.signal(s); err != nil {
+ return newSystemError(err)
+ }
+ return nil
+}
+
+func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) {
+ parentPipe, childPipe, err := newPipe()
+ if err != nil {
+ return nil, newSystemError(err)
+ }
+ cmd, err := c.commandTemplate(p, childPipe)
+ if err != nil {
+ return nil, newSystemError(err)
+ }
+ if !doInit {
+ return c.newSetnsProcess(p, cmd, parentPipe, childPipe)
+ }
+ return c.newInitProcess(p, cmd, parentPipe, childPipe)
+}
+
+func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) {
+ cmd := &exec.Cmd{
+ Path: c.initPath,
+ Args: c.initArgs,
+ }
+ cmd.Stdin = p.Stdin
+ cmd.Stdout = p.Stdout
+ cmd.Stderr = p.Stderr
+ cmd.Dir = c.config.Rootfs
+ if cmd.SysProcAttr == nil {
+ cmd.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ cmd.ExtraFiles = append(p.ExtraFiles, childPipe)
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
+ // NOTE: when running a container with no PID namespace and the parent process spawning the container is
+ // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
+ // even with the parent still running.
+ if c.config.ParentDeathSignal > 0 {
+ cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal)
+ }
+ return cmd, nil
+}
+
+func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) {
+ t := "_LIBCONTAINER_INITTYPE=" + string(initStandard)
+ cloneFlags := c.config.Namespaces.CloneFlags()
+ if cloneFlags&syscall.CLONE_NEWUSER != 0 {
+ if err := c.addUidGidMappings(cmd.SysProcAttr); err != nil {
+ // user mappings are not supported
+ return nil, err
+ }
+ enableSetgroups(cmd.SysProcAttr)
+ // Default to root user when user namespaces are enabled.
+ if cmd.SysProcAttr.Credential == nil {
+ cmd.SysProcAttr.Credential = &syscall.Credential{}
+ }
+ }
+ cmd.Env = append(cmd.Env, t)
+ cmd.SysProcAttr.Cloneflags = cloneFlags
+ return &initProcess{
+ cmd: cmd,
+ childPipe: childPipe,
+ parentPipe: parentPipe,
+ manager: c.cgroupManager,
+ config: c.newInitConfig(p),
+ container: c,
+ process: p,
+ }, nil
+}
+
+func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) {
+ cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
+ // for setns process, we dont have to set cloneflags as the process namespaces
+ // will only be set via setns syscall
+ data, err := c.bootstrapData(0, c.initProcess.pid(), p.consolePath)
+ if err != nil {
+ return nil, err
+ }
+ // TODO: set on container for process management
+ return &setnsProcess{
+ cmd: cmd,
+ cgroupPaths: c.cgroupManager.GetPaths(),
+ childPipe: childPipe,
+ parentPipe: parentPipe,
+ config: c.newInitConfig(p),
+ process: p,
+ bootstrapData: data,
+ }, nil
+}
+
+func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
+ return &initConfig{
+ Config: c.config,
+ Args: process.Args,
+ Env: process.Env,
+ User: process.User,
+ Cwd: process.Cwd,
+ Console: process.consolePath,
+ Capabilities: process.Capabilities,
+ PassedFilesCount: len(process.ExtraFiles),
+ }
+}
+
+func newPipe() (parent *os.File, child *os.File, err error) {
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
+}
+
+func (c *linuxContainer) Destroy() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ return c.state.destroy()
+}
+
+func (c *linuxContainer) Pause() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ if status != Running {
+ return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning)
+ }
+ if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
+ return err
+ }
+ return c.state.transition(&pausedState{
+ c: c,
+ })
+}
+
+func (c *linuxContainer) Resume() error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ status, err := c.currentStatus()
+ if err != nil {
+ return err
+ }
+ if status != Paused {
+ return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
+ }
+ if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
+ return err
+ }
+ return c.state.transition(&runningState{
+ c: c,
+ })
+}
+
+func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
+ return notifyOnOOM(c.cgroupManager.GetPaths())
+}
+
+func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
+ return notifyMemoryPressure(c.cgroupManager.GetPaths(), level)
+}
+
+// XXX debug support, remove when debugging done.
+func addArgsFromEnv(evar string, args *[]string) {
+ if e := os.Getenv(evar); e != "" {
+ for _, f := range strings.Fields(e) {
+ *args = append(*args, f)
+ }
+ }
+ fmt.Printf(">>> criu %v\n", *args)
+}
+
+// check Criu version greater than or equal to min_version
+func (c *linuxContainer) checkCriuVersion(min_version string) error {
+ var x, y, z, versionReq int
+
+ _, err := fmt.Sscanf(min_version, "%d.%d.%d\n", &x, &y, &z) // 1.5.2
+ if err != nil {
+ _, err = fmt.Sscanf(min_version, "Version: %d.%d\n", &x, &y) // 1.6
+ }
+ versionReq = x*10000 + y*100 + z
+
+ out, err := exec.Command(c.criuPath, "-V").Output()
+ if err != nil {
+ return fmt.Errorf("Unable to execute CRIU command: %s", c.criuPath)
+ }
+
+ x = 0
+ y = 0
+ z = 0
+ if ep := strings.Index(string(out), "-"); ep >= 0 {
+ // criu Git version format
+ var version string
+ if sp := strings.Index(string(out), "GitID"); sp > 0 {
+ version = string(out)[sp:ep]
+ } else {
+ return fmt.Errorf("Unable to parse the CRIU version: %s", c.criuPath)
+ }
+
+ n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2
+ if err != nil {
+ n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6
+ y++
+ } else {
+ z++
+ }
+ if n < 2 || err != nil {
+ return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err)
+ }
+ } else {
+ // criu release version format
+ n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2
+ if err != nil {
+ n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6
+ }
+ if n < 2 || err != nil {
+ return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err)
+ }
+ }
+
+ c.criuVersion = x*10000 + y*100 + z
+
+ if c.criuVersion < versionReq {
+ return fmt.Errorf("CRIU version must be %s or higher", min_version)
+ }
+
+ return nil
+}
+
+const descriptorsFilename = "descriptors.json"
+
+func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
+ mountDest := m.Destination
+ if strings.HasPrefix(mountDest, c.config.Rootfs) {
+ mountDest = mountDest[len(c.config.Rootfs):]
+ }
+
+ extMnt := &criurpc.ExtMountMap{
+ Key: proto.String(mountDest),
+ Val: proto.String(mountDest),
+ }
+ req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
+}
+
+func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if err := c.checkCriuVersion("1.5.2"); err != nil {
+ return err
+ }
+
+ if criuOpts.ImagesDirectory == "" {
+ return fmt.Errorf("invalid directory to save checkpoint")
+ }
+
+ // Since a container can be C/R'ed multiple times,
+ // the checkpoint directory may already exist.
+ if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ if criuOpts.WorkDirectory == "" {
+ criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
+ }
+
+ if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ workDir, err := os.Open(criuOpts.WorkDirectory)
+ if err != nil {
+ return err
+ }
+ defer workDir.Close()
+
+ imageDir, err := os.Open(criuOpts.ImagesDirectory)
+ if err != nil {
+ return err
+ }
+ defer imageDir.Close()
+
+ rpcOpts := criurpc.CriuOpts{
+ ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
+ WorkDirFd: proto.Int32(int32(workDir.Fd())),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("dump.log"),
+ Root: proto.String(c.config.Rootfs),
+ ManageCgroups: proto.Bool(true),
+ NotifyScripts: proto.Bool(true),
+ Pid: proto.Int32(int32(c.initProcess.pid())),
+ ShellJob: proto.Bool(criuOpts.ShellJob),
+ LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
+ TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
+ ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
+ FileLocks: proto.Bool(criuOpts.FileLocks),
+ }
+
+ // append optional criu opts, e.g., page-server and port
+ if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
+ rpcOpts.Ps = &criurpc.CriuPageServerInfo{
+ Address: proto.String(criuOpts.PageServer.Address),
+ Port: proto.Int32(criuOpts.PageServer.Port),
+ }
+ }
+
+ // append optional manage cgroups mode
+ if criuOpts.ManageCgroupsMode != 0 {
+ if err := c.checkCriuVersion("1.7"); err != nil {
+ return err
+ }
+ rpcOpts.ManageCgroupsMode = proto.Uint32(uint32(criuOpts.ManageCgroupsMode))
+ }
+
+ t := criurpc.CriuReqType_DUMP
+ req := &criurpc.CriuReq{
+ Type: &t,
+ Opts: &rpcOpts,
+ }
+
+ for _, m := range c.config.Mounts {
+ switch m.Device {
+ case "bind":
+ c.addCriuDumpMount(req, m)
+ break
+ case "cgroup":
+ binds, err := getCgroupMounts(m)
+ if err != nil {
+ return err
+ }
+ for _, b := range binds {
+ c.addCriuDumpMount(req, b)
+ }
+ break
+ }
+ }
+
+ // Write the FD info to a file in the image directory
+
+ fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
+ if err != nil {
+ return err
+ }
+
+ err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655)
+ if err != nil {
+ return err
+ }
+
+ err = c.criuSwrk(nil, req, criuOpts, false)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
+ mountDest := m.Destination
+ if strings.HasPrefix(mountDest, c.config.Rootfs) {
+ mountDest = mountDest[len(c.config.Rootfs):]
+ }
+
+ extMnt := &criurpc.ExtMountMap{
+ Key: proto.String(mountDest),
+ Val: proto.String(m.Source),
+ }
+ req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
+}
+
+func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ if err := c.checkCriuVersion("1.5.2"); err != nil {
+ return err
+ }
+ if criuOpts.WorkDirectory == "" {
+ criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
+ }
+ // Since a container can be C/R'ed multiple times,
+ // the work directory may already exist.
+ if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) {
+ return err
+ }
+ workDir, err := os.Open(criuOpts.WorkDirectory)
+ if err != nil {
+ return err
+ }
+ defer workDir.Close()
+ if criuOpts.ImagesDirectory == "" {
+ return fmt.Errorf("invalid directory to restore checkpoint")
+ }
+ imageDir, err := os.Open(criuOpts.ImagesDirectory)
+ if err != nil {
+ return err
+ }
+ defer imageDir.Close()
+ // CRIU has a few requirements for a root directory:
+ // * it must be a mount point
+ // * its parent must not be overmounted
+ // c.config.Rootfs is bind-mounted to a temporary directory
+ // to satisfy these requirements.
+ root := filepath.Join(c.root, "criu-root")
+ if err := os.Mkdir(root, 0755); err != nil {
+ return err
+ }
+ defer os.Remove(root)
+ root, err = filepath.EvalSymlinks(root)
+ if err != nil {
+ return err
+ }
+ err = syscall.Mount(c.config.Rootfs, root, "", syscall.MS_BIND|syscall.MS_REC, "")
+ if err != nil {
+ return err
+ }
+ defer syscall.Unmount(root, syscall.MNT_DETACH)
+ t := criurpc.CriuReqType_RESTORE
+ req := &criurpc.CriuReq{
+ Type: &t,
+ Opts: &criurpc.CriuOpts{
+ ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
+ WorkDirFd: proto.Int32(int32(workDir.Fd())),
+ EvasiveDevices: proto.Bool(true),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("restore.log"),
+ RstSibling: proto.Bool(true),
+ Root: proto.String(root),
+ ManageCgroups: proto.Bool(true),
+ NotifyScripts: proto.Bool(true),
+ ShellJob: proto.Bool(criuOpts.ShellJob),
+ ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
+ TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
+ FileLocks: proto.Bool(criuOpts.FileLocks),
+ },
+ }
+
+ for _, m := range c.config.Mounts {
+ switch m.Device {
+ case "bind":
+ c.addCriuRestoreMount(req, m)
+ break
+ case "cgroup":
+ binds, err := getCgroupMounts(m)
+ if err != nil {
+ return err
+ }
+ for _, b := range binds {
+ c.addCriuRestoreMount(req, b)
+ }
+ break
+ }
+ }
+ for _, iface := range c.config.Networks {
+ switch iface.Type {
+ case "veth":
+ veth := new(criurpc.CriuVethPair)
+ veth.IfOut = proto.String(iface.HostInterfaceName)
+ veth.IfIn = proto.String(iface.Name)
+ req.Opts.Veths = append(req.Opts.Veths, veth)
+ break
+ case "loopback":
+ break
+ }
+ }
+ for _, i := range criuOpts.VethPairs {
+ veth := new(criurpc.CriuVethPair)
+ veth.IfOut = proto.String(i.HostInterfaceName)
+ veth.IfIn = proto.String(i.ContainerInterfaceName)
+ req.Opts.Veths = append(req.Opts.Veths, veth)
+ }
+
+ // append optional manage cgroups mode
+ if criuOpts.ManageCgroupsMode != 0 {
+ if err := c.checkCriuVersion("1.7"); err != nil {
+ return err
+ }
+ req.Opts.ManageCgroupsMode = proto.Uint32(uint32(criuOpts.ManageCgroupsMode))
+ }
+
+ var (
+ fds []string
+ fdJSON []byte
+ )
+ if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(fdJSON, &fds); err != nil {
+ return err
+ }
+ for i := range fds {
+ if s := fds[i]; strings.Contains(s, "pipe:") {
+ inheritFd := new(criurpc.InheritFd)
+ inheritFd.Key = proto.String(s)
+ inheritFd.Fd = proto.Int32(int32(i))
+ req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
+ }
+ }
+ return c.criuSwrk(process, req, criuOpts, true)
+}
+
+func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
+ if err := c.cgroupManager.Apply(pid); err != nil {
+ return err
+ }
+
+ path := fmt.Sprintf("/proc/%d/cgroup", pid)
+ cgroupsPaths, err := cgroups.ParseCgroupFile(path)
+ if err != nil {
+ return err
+ }
+
+ for c, p := range cgroupsPaths {
+ cgroupRoot := &criurpc.CgroupRoot{
+ Ctrl: proto.String(c),
+ Path: proto.String(p),
+ }
+ req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
+ }
+
+ return nil
+}
+
+func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool) error {
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return err
+ }
+
+ logPath := filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
+ criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
+ criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
+ defer criuClient.Close()
+ defer criuServer.Close()
+
+ args := []string{"swrk", "3"}
+ logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath)
+ logrus.Debugf("Using CRIU with following args: %s", args)
+ cmd := exec.Command(c.criuPath, args...)
+ if process != nil {
+ cmd.Stdin = process.Stdin
+ cmd.Stdout = process.Stdout
+ cmd.Stderr = process.Stderr
+ }
+ cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
+
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ criuServer.Close()
+
+ defer func() {
+ criuClient.Close()
+ _, err := cmd.Process.Wait()
+ if err != nil {
+ return
+ }
+ }()
+
+ if applyCgroups {
+ err := c.criuApplyCgroups(cmd.Process.Pid, req)
+ if err != nil {
+ return err
+ }
+ }
+
+ var extFds []string
+ if process != nil {
+ extFds, err = getPipeFds(cmd.Process.Pid)
+ if err != nil {
+ return err
+ }
+ }
+
+ logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
+ val := reflect.ValueOf(req.GetOpts())
+ v := reflect.Indirect(val)
+ for i := 0; i < v.NumField(); i++ {
+ st := v.Type()
+ name := st.Field(i).Name
+ if strings.HasPrefix(name, "XXX_") {
+ continue
+ }
+ value := val.MethodByName("Get" + name).Call([]reflect.Value{})
+ logrus.Debugf("CRIU option %s with value %v", name, value[0])
+ }
+ data, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+ _, err = criuClient.Write(data)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, 10*4096)
+ for true {
+ n, err := criuClient.Read(buf)
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return fmt.Errorf("unexpected EOF")
+ }
+ if n == len(buf) {
+ return fmt.Errorf("buffer is too small")
+ }
+
+ resp := new(criurpc.CriuResp)
+ err = proto.Unmarshal(buf[:n], resp)
+ if err != nil {
+ return err
+ }
+ if !resp.GetSuccess() {
+ typeString := req.GetType().String()
+ return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
+ }
+
+ t := resp.GetType()
+ switch {
+ case t == criurpc.CriuReqType_NOTIFY:
+ if err := c.criuNotifications(resp, process, opts, extFds); err != nil {
+ return err
+ }
+ t = criurpc.CriuReqType_NOTIFY
+ req = &criurpc.CriuReq{
+ Type: &t,
+ NotifySuccess: proto.Bool(true),
+ }
+ data, err = proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+ n, err = criuClient.Write(data)
+ if err != nil {
+ return err
+ }
+ continue
+ case t == criurpc.CriuReqType_RESTORE:
+ case t == criurpc.CriuReqType_DUMP:
+ break
+ default:
+ return fmt.Errorf("unable to parse the response %s", resp.String())
+ }
+
+ break
+ }
+
+ // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
+ // Here we want to wait only the CRIU process.
+ st, err := cmd.Process.Wait()
+ if err != nil {
+ return err
+ }
+ if !st.Success() {
+ return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath)
+ }
+ return nil
+}
+
+// block any external network activity
+func lockNetwork(config *configs.Config) error {
+ for _, config := range config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+
+ if err := strategy.detach(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unlockNetwork(config *configs.Config) error {
+ for _, config := range config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+ if err = strategy.attach(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string) error {
+ notify := resp.GetNotify()
+ if notify == nil {
+ return fmt.Errorf("invalid response: %s", resp.String())
+ }
+ switch {
+ case notify.GetScript() == "post-dump":
+ f, err := os.Create(filepath.Join(c.root, "checkpoint"))
+ if err != nil {
+ return err
+ }
+ f.Close()
+ case notify.GetScript() == "network-unlock":
+ if err := unlockNetwork(c.config); err != nil {
+ return err
+ }
+ case notify.GetScript() == "network-lock":
+ if err := lockNetwork(c.config); err != nil {
+ return err
+ }
+ case notify.GetScript() == "post-restore":
+ pid := notify.GetPid()
+ r, err := newRestoredProcess(int(pid), fds)
+ if err != nil {
+ return err
+ }
+ process.ops = r
+ if err := c.state.transition(&restoredState{
+ imageDir: opts.ImagesDirectory,
+ c: c,
+ }); err != nil {
+ return err
+ }
+ if err := c.updateState(r); err != nil {
+ return err
+ }
+ if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
+ if !os.IsNotExist(err) {
+ logrus.Error(err)
+ }
+ }
+ }
+ return nil
+}
+
+func (c *linuxContainer) updateState(process parentProcess) error {
+ c.initProcess = process
+ state, err := c.currentState()
+ if err != nil {
+ return err
+ }
+ return c.saveState(state)
+}
+
+func (c *linuxContainer) saveState(s *State) error {
+ f, err := os.Create(filepath.Join(c.root, stateFilename))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return utils.WriteJSON(f, s)
+}
+
+func (c *linuxContainer) deleteState() error {
+ return os.Remove(filepath.Join(c.root, stateFilename))
+}
+
+func (c *linuxContainer) currentStatus() (Status, error) {
+ if err := c.refreshState(); err != nil {
+ return -1, err
+ }
+ return c.state.status(), nil
+}
+
+// refreshState needs to be called to verify that the current state on the
+// container is what is true. Because consumers of libcontainer can use it
+// out of process we need to verify the container's status based on runtime
+// information and not rely on our in process info.
+func (c *linuxContainer) refreshState() error {
+ paused, err := c.isPaused()
+ if err != nil {
+ return err
+ }
+ if paused {
+ return c.state.transition(&pausedState{c: c})
+ }
+ running, err := c.isRunning()
+ if err != nil {
+ return err
+ }
+ if running {
+ return c.state.transition(&runningState{c: c})
+ }
+ return c.state.transition(&stoppedState{c: c})
+}
+
+func (c *linuxContainer) isRunning() (bool, error) {
+ if c.initProcess == nil {
+ return false, nil
+ }
+ // return Running if the init process is alive
+ if err := syscall.Kill(c.initProcess.pid(), 0); err != nil {
+ if err == syscall.ESRCH {
+ return false, nil
+ }
+ return false, newSystemError(err)
+ }
+ return true, nil
+}
+
+func (c *linuxContainer) isPaused() (bool, error) {
+ data, err := ioutil.ReadFile(filepath.Join(c.cgroupManager.GetPaths()["freezer"], "freezer.state"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, newSystemError(err)
+ }
+ return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil
+}
+
+func (c *linuxContainer) currentState() (*State, error) {
+ var (
+ startTime string
+ externalDescriptors []string
+ pid = -1
+ )
+ if c.initProcess != nil {
+ pid = c.initProcess.pid()
+ startTime, _ = c.initProcess.startTime()
+ externalDescriptors = c.initProcess.externalDescriptors()
+ }
+ state := &State{
+ BaseState: BaseState{
+ ID: c.ID(),
+ Config: *c.config,
+ InitProcessPid: pid,
+ InitProcessStartTime: startTime,
+ },
+ CgroupPaths: c.cgroupManager.GetPaths(),
+ NamespacePaths: make(map[configs.NamespaceType]string),
+ ExternalDescriptors: externalDescriptors,
+ }
+ if pid > 0 {
+ for _, ns := range c.config.Namespaces {
+ state.NamespacePaths[ns.Type] = ns.GetPath(pid)
+ }
+ for _, nsType := range configs.NamespaceTypes() {
+ if _, ok := state.NamespacePaths[nsType]; !ok {
+ ns := configs.Namespace{Type: nsType}
+ state.NamespacePaths[ns.Type] = ns.GetPath(pid)
+ }
+ }
+ }
+ return state, nil
+}
+
+// bootstrapData encodes the necessary data in netlink binary format as a io.Reader.
+// Consumer can write the data to a bootstrap program such as one that uses
+// nsenter package to bootstrap the container's init process correctly, i.e. with
+// correct namespaces, uid/gid mapping etc.
+func (c *linuxContainer) bootstrapData(cloneFlags uintptr, pid int, consolePath string) (io.Reader, error) {
+ // create the netlink message
+ r := nl.NewNetlinkRequest(int(InitMsg), 0)
+ // write pid
+ r.AddData(&Int32msg{
+ Type: PidAttr,
+ Value: uint32(pid),
+ })
+ // write console path
+ if consolePath != "" {
+ r.AddData(&Bytemsg{
+ Type: ConsolePathAttr,
+ Value: []byte(consolePath),
+ })
+ }
+ return bytes.NewReader(r.Serialize()), nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_nouserns_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_nouserns_linux.go
new file mode 100644
index 0000000..3b75d59
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_nouserns_linux.go
@@ -0,0 +1,13 @@
+// +build !go1.4
+
+package libcontainer
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// not available before go 1.4
+func (c *linuxContainer) addUidGidMappings(sys *syscall.SysProcAttr) error {
+ return fmt.Errorf("User namespace is not supported in golang < 1.4")
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_userns_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_userns_linux.go
new file mode 100644
index 0000000..5f4cf3c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_userns_linux.go
@@ -0,0 +1,26 @@
+// +build go1.4
+
+package libcontainer
+
+import "syscall"
+
+// Converts IDMap to SysProcIDMap array and adds it to SysProcAttr.
+func (c *linuxContainer) addUidGidMappings(sys *syscall.SysProcAttr) error {
+ if c.config.UidMappings != nil {
+ sys.UidMappings = make([]syscall.SysProcIDMap, len(c.config.UidMappings))
+ for i, um := range c.config.UidMappings {
+ sys.UidMappings[i].ContainerID = um.ContainerID
+ sys.UidMappings[i].HostID = um.HostID
+ sys.UidMappings[i].Size = um.Size
+ }
+ }
+ if c.config.GidMappings != nil {
+ sys.GidMappings = make([]syscall.SysProcIDMap, len(c.config.GidMappings))
+ for i, gm := range c.config.GidMappings {
+ sys.GidMappings[i].ContainerID = gm.ContainerID
+ sys.GidMappings[i].HostID = gm.HostID
+ sys.GidMappings[i].Size = gm.Size
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_windows.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_windows.go
new file mode 100644
index 0000000..bb84ff7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/container_windows.go
@@ -0,0 +1,20 @@
+package libcontainer
+
+// State represents a running container's state
+type State struct {
+ BaseState
+
+ // Platform specific fields below here
+}
+
+// A libcontainer container object.
+//
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found.
+type Container interface {
+ BaseContainer
+
+ // Methods below here are platform specific
+
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go
new file mode 100644
index 0000000..a2a816b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go
@@ -0,0 +1,36 @@
+// +build linux freebsd
+
+package libcontainer
+
+// cgroup restoring strategy provided by criu
+type cg_mode uint32
+
+const (
+ CRIU_CG_MODE_SOFT cg_mode = 3 + iota // restore cgroup properties if only dir created by criu
+ CRIU_CG_MODE_FULL // always restore all cgroups and their properties
+ CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system
+ CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT
+)
+
+type CriuPageServerInfo struct {
+ Address string // IP address of CRIU page server
+ Port int32 // port number of CRIU page server
+}
+
+type VethPairName struct {
+ ContainerInterfaceName string
+ HostInterfaceName string
+}
+
+type CriuOpts struct {
+ ImagesDirectory string // directory for storing image files
+ WorkDirectory string // directory to cd and write logs/pidfiles/stats to
+ LeaveRunning bool // leave container in running state after checkpoint
+ TcpEstablished bool // checkpoint/restore established TCP connections
+ ExternalUnixConnections bool // allow external unix connections
+ ShellJob bool // allow to dump and restore shell jobs
+ FileLocks bool // handle file locks, for safety
+ PageServer CriuPageServerInfo // allow to dump to criu page server
+ VethPairs []VethPairName // pass the veth to criu when restore
+ ManageCgroupsMode cg_mode // dump or restore cgroup mode
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go
new file mode 100644
index 0000000..bc92077
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go
@@ -0,0 +1,6 @@
+package libcontainer
+
+// TODO Windows: This can ultimately be entirely factored out as criu is
+// a Unix concept not relevant on Windows.
+type CriuOpts struct {
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/Makefile b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/Makefile
new file mode 100644
index 0000000..3e5346a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/Makefile
@@ -0,0 +1,2 @@
+gen: criurpc.proto
+ protoc --go_out=. criurpc.proto
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go
new file mode 100644
index 0000000..193b6df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go
@@ -0,0 +1,689 @@
+// Code generated by protoc-gen-go.
+// source: criurpc.proto
+// DO NOT EDIT!
+
+/*
+Package criurpc is a generated protocol buffer package.
+
+It is generated from these files:
+ criurpc.proto
+
+It has these top-level messages:
+ CriuPageServerInfo
+ CriuVethPair
+ ExtMountMap
+ InheritFd
+ CgroupRoot
+ UnixSk
+ CriuOpts
+ CriuDumpResp
+ CriuRestoreResp
+ CriuNotify
+ CriuReq
+ CriuResp
+*/
+package criurpc
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type CriuReqType int32
+
+const (
+ CriuReqType_EMPTY CriuReqType = 0
+ CriuReqType_DUMP CriuReqType = 1
+ CriuReqType_RESTORE CriuReqType = 2
+ CriuReqType_CHECK CriuReqType = 3
+ CriuReqType_PRE_DUMP CriuReqType = 4
+ CriuReqType_PAGE_SERVER CriuReqType = 5
+ CriuReqType_NOTIFY CriuReqType = 6
+ CriuReqType_CPUINFO_DUMP CriuReqType = 7
+ CriuReqType_CPUINFO_CHECK CriuReqType = 8
+)
+
+var CriuReqType_name = map[int32]string{
+ 0: "EMPTY",
+ 1: "DUMP",
+ 2: "RESTORE",
+ 3: "CHECK",
+ 4: "PRE_DUMP",
+ 5: "PAGE_SERVER",
+ 6: "NOTIFY",
+ 7: "CPUINFO_DUMP",
+ 8: "CPUINFO_CHECK",
+}
+var CriuReqType_value = map[string]int32{
+ "EMPTY": 0,
+ "DUMP": 1,
+ "RESTORE": 2,
+ "CHECK": 3,
+ "PRE_DUMP": 4,
+ "PAGE_SERVER": 5,
+ "NOTIFY": 6,
+ "CPUINFO_DUMP": 7,
+ "CPUINFO_CHECK": 8,
+}
+
+func (x CriuReqType) Enum() *CriuReqType {
+ p := new(CriuReqType)
+ *p = x
+ return p
+}
+func (x CriuReqType) String() string {
+ return proto.EnumName(CriuReqType_name, int32(x))
+}
+func (x *CriuReqType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CriuReqType_value, data, "CriuReqType")
+ if err != nil {
+ return err
+ }
+ *x = CriuReqType(value)
+ return nil
+}
+
+type CriuPageServerInfo struct {
+ Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
+ Pid *int32 `protobuf:"varint,3,opt,name=pid" json:"pid,omitempty"`
+ Fd *int32 `protobuf:"varint,4,opt,name=fd" json:"fd,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuPageServerInfo) Reset() { *m = CriuPageServerInfo{} }
+func (m *CriuPageServerInfo) String() string { return proto.CompactTextString(m) }
+func (*CriuPageServerInfo) ProtoMessage() {}
+
+func (m *CriuPageServerInfo) GetAddress() string {
+ if m != nil && m.Address != nil {
+ return *m.Address
+ }
+ return ""
+}
+
+func (m *CriuPageServerInfo) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *CriuPageServerInfo) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+func (m *CriuPageServerInfo) GetFd() int32 {
+ if m != nil && m.Fd != nil {
+ return *m.Fd
+ }
+ return 0
+}
+
+type CriuVethPair struct {
+ IfIn *string `protobuf:"bytes,1,req,name=if_in" json:"if_in,omitempty"`
+ IfOut *string `protobuf:"bytes,2,req,name=if_out" json:"if_out,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuVethPair) Reset() { *m = CriuVethPair{} }
+func (m *CriuVethPair) String() string { return proto.CompactTextString(m) }
+func (*CriuVethPair) ProtoMessage() {}
+
+func (m *CriuVethPair) GetIfIn() string {
+ if m != nil && m.IfIn != nil {
+ return *m.IfIn
+ }
+ return ""
+}
+
+func (m *CriuVethPair) GetIfOut() string {
+ if m != nil && m.IfOut != nil {
+ return *m.IfOut
+ }
+ return ""
+}
+
+type ExtMountMap struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Val *string `protobuf:"bytes,2,req,name=val" json:"val,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ExtMountMap) Reset() { *m = ExtMountMap{} }
+func (m *ExtMountMap) String() string { return proto.CompactTextString(m) }
+func (*ExtMountMap) ProtoMessage() {}
+
+func (m *ExtMountMap) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *ExtMountMap) GetVal() string {
+ if m != nil && m.Val != nil {
+ return *m.Val
+ }
+ return ""
+}
+
+type InheritFd struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Fd *int32 `protobuf:"varint,2,req,name=fd" json:"fd,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InheritFd) Reset() { *m = InheritFd{} }
+func (m *InheritFd) String() string { return proto.CompactTextString(m) }
+func (*InheritFd) ProtoMessage() {}
+
+func (m *InheritFd) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *InheritFd) GetFd() int32 {
+ if m != nil && m.Fd != nil {
+ return *m.Fd
+ }
+ return 0
+}
+
+type CgroupRoot struct {
+ Ctrl *string `protobuf:"bytes,1,opt,name=ctrl" json:"ctrl,omitempty"`
+ Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CgroupRoot) Reset() { *m = CgroupRoot{} }
+func (m *CgroupRoot) String() string { return proto.CompactTextString(m) }
+func (*CgroupRoot) ProtoMessage() {}
+
+func (m *CgroupRoot) GetCtrl() string {
+ if m != nil && m.Ctrl != nil {
+ return *m.Ctrl
+ }
+ return ""
+}
+
+func (m *CgroupRoot) GetPath() string {
+ if m != nil && m.Path != nil {
+ return *m.Path
+ }
+ return ""
+}
+
+type UnixSk struct {
+ Inode *uint32 `protobuf:"varint,1,req,name=inode" json:"inode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UnixSk) Reset() { *m = UnixSk{} }
+func (m *UnixSk) String() string { return proto.CompactTextString(m) }
+func (*UnixSk) ProtoMessage() {}
+
+func (m *UnixSk) GetInode() uint32 {
+ if m != nil && m.Inode != nil {
+ return *m.Inode
+ }
+ return 0
+}
+
+type CriuOpts struct {
+ ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd" json:"images_dir_fd,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+ LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running" json:"leave_running,omitempty"`
+ ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk" json:"ext_unix_sk,omitempty"`
+ TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established" json:"tcp_established,omitempty"`
+ EvasiveDevices *bool `protobuf:"varint,6,opt,name=evasive_devices" json:"evasive_devices,omitempty"`
+ ShellJob *bool `protobuf:"varint,7,opt,name=shell_job" json:"shell_job,omitempty"`
+ FileLocks *bool `protobuf:"varint,8,opt,name=file_locks" json:"file_locks,omitempty"`
+ LogLevel *int32 `protobuf:"varint,9,opt,name=log_level,def=2" json:"log_level,omitempty"`
+ LogFile *string `protobuf:"bytes,10,opt,name=log_file" json:"log_file,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,11,opt,name=ps" json:"ps,omitempty"`
+ NotifyScripts *bool `protobuf:"varint,12,opt,name=notify_scripts" json:"notify_scripts,omitempty"`
+ Root *string `protobuf:"bytes,13,opt,name=root" json:"root,omitempty"`
+ ParentImg *string `protobuf:"bytes,14,opt,name=parent_img" json:"parent_img,omitempty"`
+ TrackMem *bool `protobuf:"varint,15,opt,name=track_mem" json:"track_mem,omitempty"`
+ AutoDedup *bool `protobuf:"varint,16,opt,name=auto_dedup" json:"auto_dedup,omitempty"`
+ WorkDirFd *int32 `protobuf:"varint,17,opt,name=work_dir_fd" json:"work_dir_fd,omitempty"`
+ LinkRemap *bool `protobuf:"varint,18,opt,name=link_remap" json:"link_remap,omitempty"`
+ Veths []*CriuVethPair `protobuf:"bytes,19,rep,name=veths" json:"veths,omitempty"`
+ CpuCap *uint32 `protobuf:"varint,20,opt,name=cpu_cap,def=4294967295" json:"cpu_cap,omitempty"`
+ ForceIrmap *bool `protobuf:"varint,21,opt,name=force_irmap" json:"force_irmap,omitempty"`
+ ExecCmd []string `protobuf:"bytes,22,rep,name=exec_cmd" json:"exec_cmd,omitempty"`
+ ExtMnt []*ExtMountMap `protobuf:"bytes,23,rep,name=ext_mnt" json:"ext_mnt,omitempty"`
+ ManageCgroups *bool `protobuf:"varint,24,opt,name=manage_cgroups" json:"manage_cgroups,omitempty"`
+ CgRoot []*CgroupRoot `protobuf:"bytes,25,rep,name=cg_root" json:"cg_root,omitempty"`
+ RstSibling *bool `protobuf:"varint,26,opt,name=rst_sibling" json:"rst_sibling,omitempty"`
+ InheritFd []*InheritFd `protobuf:"bytes,27,rep,name=inherit_fd" json:"inherit_fd,omitempty"`
+ AutoExtMnt *bool `protobuf:"varint,28,opt,name=auto_ext_mnt" json:"auto_ext_mnt,omitempty"`
+ ExtSharing *bool `protobuf:"varint,29,opt,name=ext_sharing" json:"ext_sharing,omitempty"`
+ ExtMasters *bool `protobuf:"varint,30,opt,name=ext_masters" json:"ext_masters,omitempty"`
+ SkipMnt []string `protobuf:"bytes,31,rep,name=skip_mnt" json:"skip_mnt,omitempty"`
+ EnableFs []string `protobuf:"bytes,32,rep,name=enable_fs" json:"enable_fs,omitempty"`
+ UnixSkIno []*UnixSk `protobuf:"bytes,33,rep,name=unix_sk_ino" json:"unix_sk_ino,omitempty"`
+ ManageCgroupsMode *uint32 `protobuf:"varint,34,opt,name=manage_cgroups_mode" json:"manage_cgroups_mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuOpts) Reset() { *m = CriuOpts{} }
+func (m *CriuOpts) String() string { return proto.CompactTextString(m) }
+func (*CriuOpts) ProtoMessage() {}
+
+const Default_CriuOpts_LogLevel int32 = 2
+const Default_CriuOpts_CpuCap uint32 = 4294967295
+
+func (m *CriuOpts) GetImagesDirFd() int32 {
+ if m != nil && m.ImagesDirFd != nil {
+ return *m.ImagesDirFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetLeaveRunning() bool {
+ if m != nil && m.LeaveRunning != nil {
+ return *m.LeaveRunning
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtUnixSk() bool {
+ if m != nil && m.ExtUnixSk != nil {
+ return *m.ExtUnixSk
+ }
+ return false
+}
+
+func (m *CriuOpts) GetTcpEstablished() bool {
+ if m != nil && m.TcpEstablished != nil {
+ return *m.TcpEstablished
+ }
+ return false
+}
+
+func (m *CriuOpts) GetEvasiveDevices() bool {
+ if m != nil && m.EvasiveDevices != nil {
+ return *m.EvasiveDevices
+ }
+ return false
+}
+
+func (m *CriuOpts) GetShellJob() bool {
+ if m != nil && m.ShellJob != nil {
+ return *m.ShellJob
+ }
+ return false
+}
+
+func (m *CriuOpts) GetFileLocks() bool {
+ if m != nil && m.FileLocks != nil {
+ return *m.FileLocks
+ }
+ return false
+}
+
+func (m *CriuOpts) GetLogLevel() int32 {
+ if m != nil && m.LogLevel != nil {
+ return *m.LogLevel
+ }
+ return Default_CriuOpts_LogLevel
+}
+
+func (m *CriuOpts) GetLogFile() string {
+ if m != nil && m.LogFile != nil {
+ return *m.LogFile
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetPs() *CriuPageServerInfo {
+ if m != nil {
+ return m.Ps
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetNotifyScripts() bool {
+ if m != nil && m.NotifyScripts != nil {
+ return *m.NotifyScripts
+ }
+ return false
+}
+
+func (m *CriuOpts) GetRoot() string {
+ if m != nil && m.Root != nil {
+ return *m.Root
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetParentImg() string {
+ if m != nil && m.ParentImg != nil {
+ return *m.ParentImg
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetTrackMem() bool {
+ if m != nil && m.TrackMem != nil {
+ return *m.TrackMem
+ }
+ return false
+}
+
+func (m *CriuOpts) GetAutoDedup() bool {
+ if m != nil && m.AutoDedup != nil {
+ return *m.AutoDedup
+ }
+ return false
+}
+
+func (m *CriuOpts) GetWorkDirFd() int32 {
+ if m != nil && m.WorkDirFd != nil {
+ return *m.WorkDirFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetLinkRemap() bool {
+ if m != nil && m.LinkRemap != nil {
+ return *m.LinkRemap
+ }
+ return false
+}
+
+func (m *CriuOpts) GetVeths() []*CriuVethPair {
+ if m != nil {
+ return m.Veths
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetCpuCap() uint32 {
+ if m != nil && m.CpuCap != nil {
+ return *m.CpuCap
+ }
+ return Default_CriuOpts_CpuCap
+}
+
+func (m *CriuOpts) GetForceIrmap() bool {
+ if m != nil && m.ForceIrmap != nil {
+ return *m.ForceIrmap
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExecCmd() []string {
+ if m != nil {
+ return m.ExecCmd
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetExtMnt() []*ExtMountMap {
+ if m != nil {
+ return m.ExtMnt
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetManageCgroups() bool {
+ if m != nil && m.ManageCgroups != nil {
+ return *m.ManageCgroups
+ }
+ return false
+}
+
+func (m *CriuOpts) GetCgRoot() []*CgroupRoot {
+ if m != nil {
+ return m.CgRoot
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetRstSibling() bool {
+ if m != nil && m.RstSibling != nil {
+ return *m.RstSibling
+ }
+ return false
+}
+
+func (m *CriuOpts) GetInheritFd() []*InheritFd {
+ if m != nil {
+ return m.InheritFd
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetAutoExtMnt() bool {
+ if m != nil && m.AutoExtMnt != nil {
+ return *m.AutoExtMnt
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtSharing() bool {
+ if m != nil && m.ExtSharing != nil {
+ return *m.ExtSharing
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtMasters() bool {
+ if m != nil && m.ExtMasters != nil {
+ return *m.ExtMasters
+ }
+ return false
+}
+
+func (m *CriuOpts) GetSkipMnt() []string {
+ if m != nil {
+ return m.SkipMnt
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetEnableFs() []string {
+ if m != nil {
+ return m.EnableFs
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetUnixSkIno() []*UnixSk {
+ if m != nil {
+ return m.UnixSkIno
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetManageCgroupsMode() uint32 {
+ if m != nil && m.ManageCgroupsMode != nil {
+ return *m.ManageCgroupsMode
+ }
+ return 0
+}
+
+type CriuDumpResp struct {
+ Restored *bool `protobuf:"varint,1,opt,name=restored" json:"restored,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuDumpResp) Reset() { *m = CriuDumpResp{} }
+func (m *CriuDumpResp) String() string { return proto.CompactTextString(m) }
+func (*CriuDumpResp) ProtoMessage() {}
+
+func (m *CriuDumpResp) GetRestored() bool {
+ if m != nil && m.Restored != nil {
+ return *m.Restored
+ }
+ return false
+}
+
+type CriuRestoreResp struct {
+ Pid *int32 `protobuf:"varint,1,req,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuRestoreResp) Reset() { *m = CriuRestoreResp{} }
+func (m *CriuRestoreResp) String() string { return proto.CompactTextString(m) }
+func (*CriuRestoreResp) ProtoMessage() {}
+
+func (m *CriuRestoreResp) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+type CriuNotify struct {
+ Script *string `protobuf:"bytes,1,opt,name=script" json:"script,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuNotify) Reset() { *m = CriuNotify{} }
+func (m *CriuNotify) String() string { return proto.CompactTextString(m) }
+func (*CriuNotify) ProtoMessage() {}
+
+func (m *CriuNotify) GetScript() string {
+ if m != nil && m.Script != nil {
+ return *m.Script
+ }
+ return ""
+}
+
+func (m *CriuNotify) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+type CriuReq struct {
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"`
+ NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success" json:"notify_success,omitempty"`
+ //
+ // When set service won't close the connection but
+ // will wait for more req-s to appear. Works not
+ // for all request types.
+ KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open" json:"keep_open,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuReq) Reset() { *m = CriuReq{} }
+func (m *CriuReq) String() string { return proto.CompactTextString(m) }
+func (*CriuReq) ProtoMessage() {}
+
+func (m *CriuReq) GetType() CriuReqType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (m *CriuReq) GetOpts() *CriuOpts {
+ if m != nil {
+ return m.Opts
+ }
+ return nil
+}
+
+func (m *CriuReq) GetNotifySuccess() bool {
+ if m != nil && m.NotifySuccess != nil {
+ return *m.NotifySuccess
+ }
+ return false
+}
+
+func (m *CriuReq) GetKeepOpen() bool {
+ if m != nil && m.KeepOpen != nil {
+ return *m.KeepOpen
+ }
+ return false
+}
+
+type CriuResp struct {
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Success *bool `protobuf:"varint,2,req,name=success" json:"success,omitempty"`
+ Dump *CriuDumpResp `protobuf:"bytes,3,opt,name=dump" json:"dump,omitempty"`
+ Restore *CriuRestoreResp `protobuf:"bytes,4,opt,name=restore" json:"restore,omitempty"`
+ Notify *CriuNotify `protobuf:"bytes,5,opt,name=notify" json:"notify,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,6,opt,name=ps" json:"ps,omitempty"`
+ CrErrno *int32 `protobuf:"varint,7,opt,name=cr_errno" json:"cr_errno,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuResp) Reset() { *m = CriuResp{} }
+func (m *CriuResp) String() string { return proto.CompactTextString(m) }
+func (*CriuResp) ProtoMessage() {}
+
+func (m *CriuResp) GetType() CriuReqType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (m *CriuResp) GetSuccess() bool {
+ if m != nil && m.Success != nil {
+ return *m.Success
+ }
+ return false
+}
+
+func (m *CriuResp) GetDump() *CriuDumpResp {
+ if m != nil {
+ return m.Dump
+ }
+ return nil
+}
+
+func (m *CriuResp) GetRestore() *CriuRestoreResp {
+ if m != nil {
+ return m.Restore
+ }
+ return nil
+}
+
+func (m *CriuResp) GetNotify() *CriuNotify {
+ if m != nil {
+ return m.Notify
+ }
+ return nil
+}
+
+func (m *CriuResp) GetPs() *CriuPageServerInfo {
+ if m != nil {
+ return m.Ps
+ }
+ return nil
+}
+
+func (m *CriuResp) GetCrErrno() int32 {
+ if m != nil && m.CrErrno != nil {
+ return *m.CrErrno
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("CriuReqType", CriuReqType_name, CriuReqType_value)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto
new file mode 100644
index 0000000..f49325e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto
@@ -0,0 +1,142 @@
+message criu_page_server_info {
+ optional string address = 1;
+ optional int32 port = 2;
+ optional int32 pid = 3;
+ optional int32 fd = 4;
+}
+
+message criu_veth_pair {
+ required string if_in = 1;
+ required string if_out = 2;
+};
+
+message ext_mount_map {
+ required string key = 1;
+ required string val = 2;
+};
+
+message inherit_fd {
+ required string key = 1;
+ required int32 fd = 2;
+};
+
+message cgroup_root {
+ optional string ctrl = 1;
+ required string path = 2;
+};
+
+message unix_sk {
+ required uint32 inode = 1;
+};
+
+message criu_opts {
+ required int32 images_dir_fd = 1;
+ optional int32 pid = 2; /* if not set on dump, will dump requesting process */
+
+ optional bool leave_running = 3;
+ optional bool ext_unix_sk = 4;
+ optional bool tcp_established = 5;
+ optional bool evasive_devices = 6;
+ optional bool shell_job = 7;
+ optional bool file_locks = 8;
+ optional int32 log_level = 9 [default = 2];
+ optional string log_file = 10; /* No subdirs are allowed. Consider using work-dir */
+
+ optional criu_page_server_info ps = 11;
+
+ optional bool notify_scripts = 12;
+
+ optional string root = 13;
+ optional string parent_img = 14;
+ optional bool track_mem = 15;
+ optional bool auto_dedup = 16;
+
+ optional int32 work_dir_fd = 17;
+ optional bool link_remap = 18;
+ repeated criu_veth_pair veths = 19;
+
+ optional uint32 cpu_cap = 20 [default = 0xffffffff];
+ optional bool force_irmap = 21;
+ repeated string exec_cmd = 22;
+
+ repeated ext_mount_map ext_mnt = 23;
+ optional bool manage_cgroups = 24; /* backward compatibility */
+ repeated cgroup_root cg_root = 25;
+
+ optional bool rst_sibling = 26; /* swrk only */
+ repeated inherit_fd inherit_fd = 27; /* swrk only */
+
+ optional bool auto_ext_mnt = 28;
+ optional bool ext_sharing = 29;
+ optional bool ext_masters = 30;
+
+ repeated string skip_mnt = 31;
+ repeated string enable_fs = 32;
+
+ repeated unix_sk unix_sk_ino = 33;
+
+ optional uint32 manage_cgroups_mode = 34;
+}
+
+message criu_dump_resp {
+ optional bool restored = 1;
+}
+
+message criu_restore_resp {
+ required int32 pid = 1;
+}
+
+message criu_notify {
+ optional string script = 1;
+ optional int32 pid = 2;
+}
+
+enum criu_req_type {
+ EMPTY = 0;
+ DUMP = 1;
+ RESTORE = 2;
+ CHECK = 3;
+ PRE_DUMP = 4;
+ PAGE_SERVER = 5;
+
+ NOTIFY = 6;
+
+ CPUINFO_DUMP = 7;
+ CPUINFO_CHECK = 8;
+}
+
+/*
+ * Request -- each type corresponds to must-be-there
+ * request arguments of respective type
+ */
+
+message criu_req {
+ required criu_req_type type = 1;
+
+ optional criu_opts opts = 2;
+ optional bool notify_success = 3;
+
+ /*
+ * When set service won't close the connection but
+ * will wait for more req-s to appear. Works not
+ * for all request types.
+ */
+ optional bool keep_open = 4;
+}
+
+/*
+ * Responce -- it states whether the request was served
+ * and additional request-specific informarion
+ */
+
+message criu_resp {
+ required criu_req_type type = 1;
+ required bool success = 2;
+
+ optional criu_dump_resp dump = 3;
+ optional criu_restore_resp restore = 4;
+ optional criu_notify notify = 5;
+ optional criu_page_server_info ps = 6;
+
+ optional int32 cr_errno = 7;
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go
new file mode 100644
index 0000000..c02b73e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go
@@ -0,0 +1,102 @@
+// +build linux freebsd
+
+package devices
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var (
+ ErrNotADevice = errors.New("not a device node")
+)
+
+// Testing dependencies
+var (
+ osLstat = os.Lstat
+ ioutilReadDir = ioutil.ReadDir
+)
+
+// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
+func DeviceFromPath(path, permissions string) (*configs.Device, error) {
+ fileInfo, err := osLstat(path)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ devType rune
+ mode = fileInfo.Mode()
+ fileModePermissionBits = os.FileMode.Perm(mode)
+ )
+ switch {
+ case mode&os.ModeDevice == 0:
+ return nil, ErrNotADevice
+ case mode&os.ModeCharDevice != 0:
+ fileModePermissionBits |= syscall.S_IFCHR
+ devType = 'c'
+ default:
+ fileModePermissionBits |= syscall.S_IFBLK
+ devType = 'b'
+ }
+ stat_t, ok := fileInfo.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil, fmt.Errorf("cannot determine the device number for device %s", path)
+ }
+ devNumber := int(stat_t.Rdev)
+ return &configs.Device{
+ Type: devType,
+ Path: path,
+ Major: Major(devNumber),
+ Minor: Minor(devNumber),
+ Permissions: permissions,
+ FileMode: fileModePermissionBits,
+ Uid: stat_t.Uid,
+ Gid: stat_t.Gid,
+ }, nil
+}
+
+func HostDevices() ([]*configs.Device, error) {
+ return getDevices("/dev")
+}
+
+func getDevices(path string) ([]*configs.Device, error) {
+ files, err := ioutilReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ out := []*configs.Device{}
+ for _, f := range files {
+ switch {
+ case f.IsDir():
+ switch f.Name() {
+ case "pts", "shm", "fd", "mqueue":
+ continue
+ default:
+ sub, err := getDevices(filepath.Join(path, f.Name()))
+ if err != nil {
+ return nil, err
+ }
+
+ out = append(out, sub...)
+ continue
+ }
+ case f.Name() == "console":
+ continue
+ }
+ device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
+ if err != nil {
+ if err == ErrNotADevice {
+ continue
+ }
+ return nil, err
+ }
+ out = append(out, device)
+ }
+ return out, nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
new file mode 100644
index 0000000..1e84033
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
@@ -0,0 +1,3 @@
+// +build windows
+
+package devices
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/number.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/number.go
new file mode 100644
index 0000000..885b6e5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/devices/number.go
@@ -0,0 +1,24 @@
+// +build linux freebsd
+
+package devices
+
+/*
+
+This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
+
+You can read what they are here:
+
+ - http://www.makelinux.net/ldd3/chp-3-sect-2
+ - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
+
+Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
+
+*/
+
+func Major(devNumber int) int64 {
+ return int64((devNumber >> 8) & 0xfff)
+}
+
+func Minor(devNumber int) int64 {
+ return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/error.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/error.go
new file mode 100644
index 0000000..378ef46
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/error.go
@@ -0,0 +1,68 @@
+package libcontainer
+
+import "io"
+
+// API error code type.
+type ErrorCode int
+
+// API error codes.
+const (
+ // Factory errors
+ IdInUse ErrorCode = iota
+ InvalidIdFormat
+
+ // Container errors
+ ContainerNotExists
+ ContainerPaused
+ ContainerNotStopped
+ ContainerNotRunning
+ ContainerNotPaused
+
+ // Process errors
+ ProcessNotExecuted
+
+ // Common errors
+ ConfigInvalid
+ ConsoleExists
+ SystemError
+)
+
+func (c ErrorCode) String() string {
+ switch c {
+ case IdInUse:
+ return "Id already in use"
+ case InvalidIdFormat:
+ return "Invalid format"
+ case ContainerPaused:
+ return "Container paused"
+ case ConfigInvalid:
+ return "Invalid configuration"
+ case SystemError:
+ return "System error"
+ case ContainerNotExists:
+ return "Container does not exist"
+ case ContainerNotStopped:
+ return "Container is not stopped"
+ case ContainerNotRunning:
+ return "Container is not running"
+ case ConsoleExists:
+ return "Console exists for process"
+ case ContainerNotPaused:
+ return "Container is not paused"
+ default:
+ return "Unknown error"
+ }
+}
+
+// API Error type.
+type Error interface {
+ error
+
+ // Returns a verbose string including the error message
+ // and a representation of the stack trace suitable for
+ // printing.
+ Detail(w io.Writer) error
+
+ // Returns the error code for this error.
+ Code() ErrorCode
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/factory.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/factory.go
new file mode 100644
index 0000000..f0ccb52
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/factory.go
@@ -0,0 +1,45 @@
+package libcontainer
+
+import (
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Factory interface {
+ // Creates a new container with the given id and starts the initial process inside it.
+ // id must be a string containing only letters, digits and underscores and must contain
+ // between 1 and 1024 characters, inclusive.
+ //
+ // The id must not already be in use by an existing container. Containers created using
+ // a factory with the same path (and file system) must have distinct ids.
+ //
+ // Returns the new container with a running process.
+ //
+ // errors:
+ // IdInUse - id is already in use by a container
+ // InvalidIdFormat - id has incorrect format
+ // ConfigInvalid - config is invalid
+ // Systemerror - System error
+ //
+ // On error, any partially created container parts are cleaned up (the operation is atomic).
+ Create(id string, config *configs.Config) (Container, error)
+
+ // Load takes an ID for an existing container and returns the container information
+ // from the state. This presents a read only view of the container.
+ //
+ // errors:
+ // Path does not exist
+ // Container is stopped
+ // System error
+ Load(id string) (Container, error)
+
+ // StartInitialization is an internal API to libcontainer used during the reexec of the
+ // container.
+ //
+ // Errors:
+ // Pipe connection error
+ // System error
+ StartInitialization() error
+
+ // Type returns info string about factory type (e.g. lxc, libcontainer...)
+ Type() string
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/factory_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/factory_linux.go
new file mode 100644
index 0000000..0e4e9df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/factory_linux.go
@@ -0,0 +1,285 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "syscall"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/cgroups/fs"
+ "github.com/opencontainers/runc/libcontainer/cgroups/systemd"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/configs/validate"
+ "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+const (
+ stateFilename = "state.json"
+)
+
+var (
+ idRegex = regexp.MustCompile(`^[\w_-]+$`)
+ maxIdLen = 1024
+)
+
+// InitArgs returns an options func to configure a LinuxFactory with the
+// provided init arguments.
+func InitArgs(args ...string) func(*LinuxFactory) error {
+ return func(l *LinuxFactory) error {
+ name := args[0]
+ if filepath.Base(name) == name {
+ if lp, err := exec.LookPath(name); err == nil {
+ name = lp
+ }
+ } else {
+ abs, err := filepath.Abs(name)
+ if err != nil {
+ return err
+ }
+ name = abs
+ }
+ l.InitPath = "/proc/self/exe"
+ l.InitArgs = append([]string{name}, args[1:]...)
+ return nil
+ }
+}
+
+// InitPath returns an options func to configure a LinuxFactory with the
+// provided absolute path to the init binary and arguements.
+func InitPath(path string, args ...string) func(*LinuxFactory) error {
+ return func(l *LinuxFactory) error {
+ l.InitPath = path
+ l.InitArgs = args
+ return nil
+ }
+}
+
+// SystemdCgroups is an options func to configure a LinuxFactory to return
+// containers that use systemd to create and manage cgroups.
+func SystemdCgroups(l *LinuxFactory) error {
+ l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
+ return &systemd.Manager{
+ Cgroups: config,
+ Paths: paths,
+ }
+ }
+ return nil
+}
+
+// Cgroupfs is an options func to configure a LinuxFactory to return
+// containers that use the native cgroups filesystem implementation to
+// create and manage cgroups.
+func Cgroupfs(l *LinuxFactory) error {
+ l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
+ return &fs.Manager{
+ Cgroups: config,
+ Paths: paths,
+ }
+ }
+ return nil
+}
+
+// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.
+func TmpfsRoot(l *LinuxFactory) error {
+ mounted, err := mount.Mounted(l.Root)
+ if err != nil {
+ return err
+ }
+ if !mounted {
+ if err := syscall.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// New returns a linux based container factory based in the root directory and
+// configures the factory with the provided option funcs.
+func New(root string, options ...func(*LinuxFactory) error) (Factory, error) {
+ if root != "" {
+ if err := os.MkdirAll(root, 0700); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ }
+ l := &LinuxFactory{
+ Root: root,
+ Validator: validate.New(),
+ CriuPath: "criu",
+ }
+ InitArgs(os.Args[0], "init")(l)
+ Cgroupfs(l)
+ for _, opt := range options {
+ if err := opt(l); err != nil {
+ return nil, err
+ }
+ }
+ return l, nil
+}
+
+// LinuxFactory implements the default factory interface for linux based systems.
+type LinuxFactory struct {
+ // Root directory for the factory to store state.
+ Root string
+
+ // InitPath is the absolute path to the init binary.
+ InitPath string
+
+ // InitArgs are arguments for calling the init responsibilities for spawning
+ // a container.
+ InitArgs []string
+
+ // CriuPath is the path to the criu binary used for checkpoint and restore of
+ // containers.
+ CriuPath string
+
+ // Validator provides validation to container configurations.
+ Validator validate.Validator
+
+ // NewCgroupsManager returns an initialized cgroups manager for a single container.
+ NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager
+}
+
+func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {
+ if l.Root == "" {
+ return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
+ }
+ if err := l.validateID(id); err != nil {
+ return nil, err
+ }
+ if err := l.Validator.Validate(config); err != nil {
+ return nil, newGenericError(err, ConfigInvalid)
+ }
+ containerRoot := filepath.Join(l.Root, id)
+ if _, err := os.Stat(containerRoot); err == nil {
+ return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse)
+ } else if !os.IsNotExist(err) {
+ return nil, newGenericError(err, SystemError)
+ }
+ if err := os.MkdirAll(containerRoot, 0700); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ c := &linuxContainer{
+ id: id,
+ root: containerRoot,
+ config: config,
+ initPath: l.InitPath,
+ initArgs: l.InitArgs,
+ criuPath: l.CriuPath,
+ cgroupManager: l.NewCgroupsManager(config.Cgroups, nil),
+ }
+ c.state = &stoppedState{c: c}
+ return c, nil
+}
+
+func (l *LinuxFactory) Load(id string) (Container, error) {
+ if l.Root == "" {
+ return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
+ }
+ containerRoot := filepath.Join(l.Root, id)
+ state, err := l.loadState(containerRoot)
+ if err != nil {
+ return nil, err
+ }
+ r := &nonChildProcess{
+ processPid: state.InitProcessPid,
+ processStartTime: state.InitProcessStartTime,
+ fds: state.ExternalDescriptors,
+ }
+ c := &linuxContainer{
+ initProcess: r,
+ id: id,
+ config: &state.Config,
+ initPath: l.InitPath,
+ initArgs: l.InitArgs,
+ criuPath: l.CriuPath,
+ cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),
+ root: containerRoot,
+ }
+ c.state = &createdState{c: c, s: Created}
+ if err := c.refreshState(); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func (l *LinuxFactory) Type() string {
+ return "libcontainer"
+}
+
+// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
+// This is a low level implementation detail of the reexec and should not be consumed externally
+func (l *LinuxFactory) StartInitialization() (err error) {
+ fdStr := os.Getenv("_LIBCONTAINER_INITPIPE")
+ pipefd, err := strconv.Atoi(fdStr)
+ if err != nil {
+ return fmt.Errorf("error converting env var _LIBCONTAINER_INITPIPE(%q) to an int: %s", fdStr, err)
+ }
+ var (
+ pipe = os.NewFile(uintptr(pipefd), "pipe")
+ it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
+ )
+ // clear the current process's environment to clean any libcontainer
+ // specific env vars.
+ os.Clearenv()
+ var i initer
+ defer func() {
+ // if we have an error during the initialization of the container's init then send it back to the
+ // parent process in the form of an initError.
+ if err != nil {
+ if _, ok := i.(*linuxStandardInit); ok {
+ // Synchronisation only necessary for standard init.
+ if err := utils.WriteJSON(pipe, syncT{procError}); err != nil {
+ panic(err)
+ }
+ }
+ if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil {
+ panic(err)
+ }
+ } else {
+ if err := utils.WriteJSON(pipe, syncT{procStart}); err != nil {
+ panic(err)
+ }
+ }
+ // ensure that this pipe is always closed
+ pipe.Close()
+ }()
+ i, err = newContainerInit(it, pipe)
+ if err != nil {
+ return err
+ }
+ return i.Init()
+}
+
+func (l *LinuxFactory) loadState(root string) (*State, error) {
+ f, err := os.Open(filepath.Join(root, stateFilename))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, newGenericError(err, ContainerNotExists)
+ }
+ return nil, newGenericError(err, SystemError)
+ }
+ defer f.Close()
+ var state *State
+ if err := json.NewDecoder(f).Decode(&state); err != nil {
+ return nil, newGenericError(err, SystemError)
+ }
+ return state, nil
+}
+
+func (l *LinuxFactory) validateID(id string) error {
+ if !idRegex.MatchString(id) {
+ return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
+ }
+ if len(id) > maxIdLen {
+ return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/generic_error.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/generic_error.go
new file mode 100644
index 0000000..924d637
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/generic_error.go
@@ -0,0 +1,87 @@
+package libcontainer
+
+import (
+ "fmt"
+ "io"
+ "text/template"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/stacktrace"
+)
+
+type syncType uint8
+
+const (
+ procReady syncType = iota
+ procError
+ procStart
+ procRun
+)
+
+type syncT struct {
+ Type syncType `json:"type"`
+}
+
+var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}}
+Code: {{.ECode}}
+{{if .Message }}
+Message: {{.Message}}
+{{end}}
+Frames:{{range $i, $frame := .Stack.Frames}}
+---
+{{$i}}: {{$frame.Function}}
+Package: {{$frame.Package}}
+File: {{$frame.File}}@{{$frame.Line}}{{end}}
+`))
+
+func newGenericError(err error, c ErrorCode) Error {
+ if le, ok := err.(Error); ok {
+ return le
+ }
+ gerr := &genericError{
+ Timestamp: time.Now(),
+ Err: err,
+ ECode: c,
+ Stack: stacktrace.Capture(1),
+ }
+ if err != nil {
+ gerr.Message = err.Error()
+ }
+ return gerr
+}
+
+func newSystemError(err error) Error {
+ if le, ok := err.(Error); ok {
+ return le
+ }
+ gerr := &genericError{
+ Timestamp: time.Now(),
+ Err: err,
+ ECode: SystemError,
+ Stack: stacktrace.Capture(1),
+ }
+ if err != nil {
+ gerr.Message = err.Error()
+ }
+ return gerr
+}
+
+type genericError struct {
+ Timestamp time.Time
+ ECode ErrorCode
+ Err error `json:"-"`
+ Message string
+ Stack stacktrace.Stacktrace
+}
+
+func (e *genericError) Error() string {
+ return fmt.Sprintf("[%d] %s: %s", e.ECode, e.ECode, e.Message)
+}
+
+func (e *genericError) Code() ErrorCode {
+ return e.ECode
+}
+
+func (e *genericError) Detail(w io.Writer) error {
+ return errorTemplate.Execute(w, e)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/init_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/init_linux.go
new file mode 100644
index 0000000..918f103
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/init_linux.go
@@ -0,0 +1,357 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/vishvananda/netlink"
+)
+
+type initType string
+
+const (
+ initSetns initType = "setns"
+ initStandard initType = "standard"
+)
+
+type pid struct {
+ Pid int `json:"pid"`
+}
+
+// network is an internal struct used to setup container networks.
+type network struct {
+ configs.Network
+
+ // TempVethPeerName is a unique temporary veth peer name that was placed into
+ // the container's namespace.
+ TempVethPeerName string `json:"temp_veth_peer_name"`
+}
+
+// initConfig is used for transferring parameters from Exec() to Init()
+type initConfig struct {
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Cwd string `json:"cwd"`
+ Capabilities []string `json:"capabilities"`
+ User string `json:"user"`
+ Config *configs.Config `json:"config"`
+ Console string `json:"console"`
+ Networks []*network `json:"network"`
+ PassedFilesCount int `json:"passed_files_count"`
+}
+
+type initer interface {
+ Init() error
+}
+
+func newContainerInit(t initType, pipe *os.File) (initer, error) {
+ var config *initConfig
+ if err := json.NewDecoder(pipe).Decode(&config); err != nil {
+ return nil, err
+ }
+ if err := populateProcessEnvironment(config.Env); err != nil {
+ return nil, err
+ }
+ switch t {
+ case initSetns:
+ return &linuxSetnsInit{
+ config: config,
+ }, nil
+ case initStandard:
+ return &linuxStandardInit{
+ pipe: pipe,
+ parentPid: syscall.Getppid(),
+ config: config,
+ }, nil
+ }
+ return nil, fmt.Errorf("unknown init type %q", t)
+}
+
+// populateProcessEnvironment loads the provided environment variables into the
+// current processes's environment.
+func populateProcessEnvironment(env []string) error {
+ for _, pair := range env {
+ p := strings.SplitN(pair, "=", 2)
+ if len(p) < 2 {
+ return fmt.Errorf("invalid environment '%v'", pair)
+ }
+ if err := os.Setenv(p[0], p[1]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// finalizeNamespace drops the caps, sets the correct user
+// and working dir, and closes any leaked file descriptors
+// before executing the command inside the namespace
+func finalizeNamespace(config *initConfig) error {
+ // Ensure that all unwanted fds we may have accidentally
+ // inherited are marked close-on-exec so they stay out of the
+ // container
+ if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
+ return err
+ }
+
+ capabilities := config.Config.Capabilities
+ if config.Capabilities != nil {
+ capabilities = config.Capabilities
+ }
+ w, err := newCapWhitelist(capabilities)
+ if err != nil {
+ return err
+ }
+ // drop capabilities in bounding set before changing user
+ if err := w.dropBoundingSet(); err != nil {
+ return err
+ }
+ // preserve existing capabilities while we change users
+ if err := system.SetKeepCaps(); err != nil {
+ return err
+ }
+ if err := setupUser(config); err != nil {
+ return err
+ }
+ if err := system.ClearKeepCaps(); err != nil {
+ return err
+ }
+ // drop all other capabilities
+ if err := w.drop(); err != nil {
+ return err
+ }
+ if config.Cwd != "" {
+ if err := syscall.Chdir(config.Cwd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// syncParentReady sends to the given pipe a JSON payload which indicates that
+// the init is ready to Exec the child process. It then waits for the parent to
+// indicate that it is cleared to Exec.
+func syncParentReady(pipe io.ReadWriter) error {
+ // Tell parent.
+ if err := utils.WriteJSON(pipe, syncT{procReady}); err != nil {
+ return err
+ }
+ // Wait for parent to give the all-clear.
+ var procSync syncT
+ if err := json.NewDecoder(pipe).Decode(&procSync); err != nil {
+ if err == io.EOF {
+ return fmt.Errorf("parent closed synchronisation channel")
+ }
+ if procSync.Type != procRun {
+ return fmt.Errorf("invalid synchronisation flag from parent")
+ }
+ }
+ return nil
+}
+
+// joinExistingNamespaces gets all the namespace paths specified for the container and
+// does a setns on the namespace fd so that the current process joins the namespace.
+func joinExistingNamespaces(namespaces []configs.Namespace) error {
+ for _, ns := range namespaces {
+ if ns.Path != "" {
+ f, err := os.OpenFile(ns.Path, os.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+ err = system.Setns(f.Fd(), uintptr(ns.Syscall()))
+ f.Close()
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// setupUser changes the groups, gid, and uid for the user inside the container
+func setupUser(config *initConfig) error {
+ // Set up defaults.
+ defaultExecUser := user.ExecUser{
+ Uid: syscall.Getuid(),
+ Gid: syscall.Getgid(),
+ Home: "/",
+ }
+ passwdPath, err := user.GetPasswdPath()
+ if err != nil {
+ return err
+ }
+ groupPath, err := user.GetGroupPath()
+ if err != nil {
+ return err
+ }
+ execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath)
+ if err != nil {
+ return err
+ }
+
+ var addGroups []int
+ if len(config.Config.AdditionalGroups) > 0 {
+ addGroups, err = user.GetAdditionalGroupsPath(config.Config.AdditionalGroups, groupPath)
+ if err != nil {
+ return err
+ }
+ }
+ // before we change to the container's user make sure that the processes STDIO
+ // is correctly owned by the user that we are switching to.
+ if err := fixStdioPermissions(execUser); err != nil {
+ return err
+ }
+ suppGroups := append(execUser.Sgids, addGroups...)
+ if err := syscall.Setgroups(suppGroups); err != nil {
+ return err
+ }
+
+ if err := system.Setgid(execUser.Gid); err != nil {
+ return err
+ }
+ if err := system.Setuid(execUser.Uid); err != nil {
+ return err
+ }
+ // if we didn't get HOME already, set it based on the user's HOME
+ if envHome := os.Getenv("HOME"); envHome == "" {
+ if err := os.Setenv("HOME", execUser.Home); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user.
+// The ownership needs to match because it is created outside of the container and needs to be
+// localized.
+func fixStdioPermissions(u *user.ExecUser) error {
+ var null syscall.Stat_t
+ if err := syscall.Stat("/dev/null", &null); err != nil {
+ return err
+ }
+ for _, fd := range []uintptr{
+ os.Stdin.Fd(),
+ os.Stderr.Fd(),
+ os.Stdout.Fd(),
+ } {
+ var s syscall.Stat_t
+ if err := syscall.Fstat(int(fd), &s); err != nil {
+ return err
+ }
+ // skip chown of /dev/null if it was used as one of the STDIO fds.
+ if s.Rdev == null.Rdev {
+ continue
+ }
+ if err := syscall.Fchown(int(fd), u.Uid, u.Gid); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setupNetwork sets up and initializes any network interface inside the container.
+func setupNetwork(config *initConfig) error {
+ for _, config := range config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+ if err := strategy.initialize(config); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setupRoute(config *configs.Config) error {
+ for _, config := range config.Routes {
+ _, dst, err := net.ParseCIDR(config.Destination)
+ if err != nil {
+ return err
+ }
+ src := net.ParseIP(config.Source)
+ if src == nil {
+ return fmt.Errorf("Invalid source for route: %s", config.Source)
+ }
+ gw := net.ParseIP(config.Gateway)
+ if gw == nil {
+ return fmt.Errorf("Invalid gateway for route: %s", config.Gateway)
+ }
+ l, err := netlink.LinkByName(config.InterfaceName)
+ if err != nil {
+ return err
+ }
+ route := &netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ Dst: dst,
+ Src: src,
+ Gw: gw,
+ LinkIndex: l.Attrs().Index,
+ }
+ if err := netlink.RouteAdd(route); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setupRlimits(config *configs.Config) error {
+ for _, rlimit := range config.Rlimits {
+ l := &syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}
+ if err := syscall.Setrlimit(rlimit.Type, l); err != nil {
+ return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err)
+ }
+ }
+ return nil
+}
+
+func setOomScoreAdj(oomScoreAdj int) error {
+ path := "/proc/self/oom_score_adj"
+ return ioutil.WriteFile(path, []byte(strconv.Itoa(oomScoreAdj)), 0700)
+}
+
+// killCgroupProcesses freezes then iterates over all the processes inside the
+// manager's cgroups sending a SIGKILL to each process then waiting for them to
+// exit.
+func killCgroupProcesses(m cgroups.Manager) error {
+ var procs []*os.Process
+ if err := m.Freeze(configs.Frozen); err != nil {
+ logrus.Warn(err)
+ }
+ pids, err := m.GetAllPids()
+ if err != nil {
+ m.Freeze(configs.Thawed)
+ return err
+ }
+ for _, pid := range pids {
+ if p, err := os.FindProcess(pid); err == nil {
+ procs = append(procs, p)
+ if err := p.Kill(); err != nil {
+ logrus.Warn(err)
+ }
+ }
+ }
+ if err := m.Freeze(configs.Thawed); err != nil {
+ logrus.Warn(err)
+ }
+ for _, p := range procs {
+ if _, err := p.Wait(); err != nil {
+ logrus.Warn(err)
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/integration/doc.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/integration/doc.go
new file mode 100644
index 0000000..87545bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/integration/doc.go
@@ -0,0 +1,2 @@
+// integration is used for integration testing of libcontainer
+package integration
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/label/label.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/label/label.go
new file mode 100644
index 0000000..97dc6ba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/label/label.go
@@ -0,0 +1,76 @@
+// +build !selinux !linux
+
+package label
+
+// InitLabels returns the process label and file labels to be used within
+// the container. A list of options can be passed into this function to alter
+// the labels.
+func InitLabels(options []string) (string, string, error) {
+ return "", "", nil
+}
+
+func GenLabels(options string) (string, string, error) {
+ return "", "", nil
+}
+
+func FormatMountLabel(src string, mountLabel string) string {
+ return src
+}
+
+func SetProcessLabel(processLabel string) error {
+ return nil
+}
+
+func SetFileLabel(path string, fileLabel string) error {
+ return nil
+}
+
+func SetFileCreateLabel(fileLabel string) error {
+ return nil
+}
+
+func Relabel(path string, fileLabel string, shared bool) error {
+ return nil
+}
+
+func GetPidLabel(pid int) (string, error) {
+ return "", nil
+}
+
+func Init() {
+}
+
+func ReserveLabel(label string) error {
+ return nil
+}
+
+func UnreserveLabel(label string) error {
+ return nil
+}
+
+// DupSecOpt takes an process label and returns security options that
+// can be used to set duplicate labels on future container processes
+func DupSecOpt(src string) []string {
+ return nil
+}
+
+// DisableSecOpt returns a security opt that can disable labeling
+// support for future container processes
+func DisableSecOpt() []string {
+ return nil
+}
+
+// Validate checks that the label does not include unexpected options
+func Validate(label string) error {
+ return nil
+}
+
+// RelabelNeeded checks whether the user requested a relabel
+func RelabelNeeded(label string) bool {
+ return false
+}
+
+// IsShared checks that the label includes a "shared" mark
+func IsShared(label string) bool {
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/label/label_selinux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/label/label_selinux.go
new file mode 100644
index 0000000..e561cbf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/label/label_selinux.go
@@ -0,0 +1,192 @@
+// +build selinux,linux
+
+package label
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/selinux"
+)
+
+// Valid Label Options
+var validOptions = map[string]bool{
+ "disable": true,
+ "type": true,
+ "user": true,
+ "role": true,
+ "level": true,
+}
+
+var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be used together")
+
+// InitLabels returns the process label and file labels to be used within
+// the container. A list of options can be passed into this function to alter
+// the labels. The labels returned will include a random MCS String, that is
+// guaranteed to be unique.
+func InitLabels(options []string) (string, string, error) {
+ if !selinux.SelinuxEnabled() {
+ return "", "", nil
+ }
+ processLabel, mountLabel := selinux.GetLxcContexts()
+ if processLabel != "" {
+ pcon := selinux.NewContext(processLabel)
+ mcon := selinux.NewContext(mountLabel)
+ for _, opt := range options {
+ if opt == "disable" {
+ return "", "", nil
+ }
+ if i := strings.Index(opt, ":"); i == -1 {
+ return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type' followed by ':' and a value", opt)
+ }
+ con := strings.SplitN(opt, ":", 2)
+ if !validOptions[con[0]] {
+ return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type'", con[0])
+
+ }
+ pcon[con[0]] = con[1]
+ if con[0] == "level" || con[0] == "user" {
+ mcon[con[0]] = con[1]
+ }
+ }
+ processLabel = pcon.Get()
+ mountLabel = mcon.Get()
+ }
+ return processLabel, mountLabel, nil
+}
+
+// DEPRECATED: The GenLabels function is only to be used during the transition to the official API.
+func GenLabels(options string) (string, string, error) {
+ return InitLabels(strings.Fields(options))
+}
+
+// FormatMountLabel returns a string to be used by the mount command.
+// The format of this string will be used to alter the labeling of the mountpoint.
+// The string returned is suitable to be used as the options field of the mount command.
+// If you need to have additional mount point options, you can pass them in as
+// the first parameter. Second parameter is the label that you wish to apply
+// to all content in the mount point.
+func FormatMountLabel(src, mountLabel string) string {
+ if mountLabel != "" {
+ switch src {
+ case "":
+ src = fmt.Sprintf("context=%q", mountLabel)
+ default:
+ src = fmt.Sprintf("%s,context=%q", src, mountLabel)
+ }
+ }
+ return src
+}
+
+// SetProcessLabel takes a process label and tells the kernel to assign the
+// label to the next program executed by the current process.
+func SetProcessLabel(processLabel string) error {
+ if processLabel == "" {
+ return nil
+ }
+ return selinux.Setexeccon(processLabel)
+}
+
+// GetProcessLabel returns the process label that the kernel will assign
+// to the next program executed by the current process. If "" is returned
+// this indicates that the default labeling will happen for the process.
+func GetProcessLabel() (string, error) {
+ return selinux.Getexeccon()
+}
+
+// SetFileLabel modifies the "path" label to the specified file label
+func SetFileLabel(path string, fileLabel string) error {
+ if selinux.SelinuxEnabled() && fileLabel != "" {
+ return selinux.Setfilecon(path, fileLabel)
+ }
+ return nil
+}
+
+// Tell the kernel the label for all files to be created
+func SetFileCreateLabel(fileLabel string) error {
+ if selinux.SelinuxEnabled() {
+ return selinux.Setfscreatecon(fileLabel)
+ }
+ return nil
+}
+
+// Change the label of path to the filelabel string.
+// It changes the MCS label to s0 if shared is true.
+// This will allow all containers to share the content.
+func Relabel(path string, fileLabel string, shared bool) error {
+ if !selinux.SelinuxEnabled() {
+ return nil
+ }
+
+ if fileLabel == "" {
+ return nil
+ }
+
+ exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true}
+ if exclude_paths[path] {
+ return fmt.Errorf("Relabeling of %s is not allowed", path)
+ }
+
+ if shared {
+ c := selinux.NewContext(fileLabel)
+ c["level"] = "s0"
+ fileLabel = c.Get()
+ }
+ return selinux.Chcon(path, fileLabel, true)
+}
+
+// GetPidLabel will return the label of the process running with the specified pid
+func GetPidLabel(pid int) (string, error) {
+ return selinux.Getpidcon(pid)
+}
+
+// Init initialises the labeling system
+func Init() {
+ selinux.SelinuxEnabled()
+}
+
+// ReserveLabel will record the fact that the MCS label has already been used.
+// This will prevent InitLabels from using the MCS label in a newly created
+// container
+func ReserveLabel(label string) error {
+ selinux.ReserveLabel(label)
+ return nil
+}
+
+// UnreserveLabel will remove the reservation of the MCS label.
+// This will allow InitLabels to use the MCS label in a newly created
+// containers
+func UnreserveLabel(label string) error {
+ selinux.FreeLxcContexts(label)
+ return nil
+}
+
+// DupSecOpt takes an process label and returns security options that
+// can be used to set duplicate labels on future container processes
+func DupSecOpt(src string) []string {
+ return selinux.DupSecOpt(src)
+}
+
+// DisableSecOpt returns a security opt that can disable labeling
+// support for future container processes
+func DisableSecOpt() []string {
+ return selinux.DisableSecOpt()
+}
+
+// Validate checks that the label does not include unexpected options
+func Validate(label string) error {
+ if strings.Contains(label, "z") && strings.Contains(label, "Z") {
+ return ErrIncompatibleLabel
+ }
+ return nil
+}
+
+// RelabelNeeded checks whether the user requested a relabel
+func RelabelNeeded(label string) bool {
+ return strings.Contains(label, "z") || strings.Contains(label, "Z")
+}
+
+// IsShared checks that the label includes a "shared" mark
+func IsShared(label string) bool {
+ return strings.Contains(label, "z")
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/message_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/message_linux.go
new file mode 100644
index 0000000..0c3301f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/message_linux.go
@@ -0,0 +1,62 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// list of known message types we want to send to bootstrap program
+// The number is randomly chosen to not conflict with known netlink types
+const (
+ InitMsg uint16 = 62000
+ PidAttr uint16 = 27281
+ ConsolePathAttr uint16 = 27282
+ // When syscall.NLA_HDRLEN is in gccgo, take this out.
+ syscall_NLA_HDRLEN = (syscall.SizeofNlAttr + syscall.NLA_ALIGNTO - 1) & ^(syscall.NLA_ALIGNTO - 1)
+)
+
+type Int32msg struct {
+ Type uint16
+ Value uint32
+}
+
+// int32msg has the following representation
+// | nlattr len | nlattr type |
+// | uint32 value |
+func (msg *Int32msg) Serialize() []byte {
+ buf := make([]byte, msg.Len())
+ native := nl.NativeEndian()
+ native.PutUint16(buf[0:2], uint16(msg.Len()))
+ native.PutUint16(buf[2:4], msg.Type)
+ native.PutUint32(buf[4:8], msg.Value)
+ return buf
+}
+
+func (msg *Int32msg) Len() int {
+ return syscall_NLA_HDRLEN + 4
+}
+
+// bytemsg has the following representation
+// | nlattr len | nlattr type |
+// | value | pad |
+type Bytemsg struct {
+ Type uint16
+ Value []byte
+}
+
+func (msg *Bytemsg) Serialize() []byte {
+ l := msg.Len()
+ buf := make([]byte, (l+syscall.NLA_ALIGNTO-1) & ^(syscall.NLA_ALIGNTO-1))
+ native := nl.NativeEndian()
+ native.PutUint16(buf[0:2], uint16(l))
+ native.PutUint16(buf[2:4], msg.Type)
+ copy(buf[4:], msg.Value)
+ return buf
+}
+
+func (msg *Bytemsg) Len() int {
+ return syscall_NLA_HDRLEN + len(msg.Value) + 1 // null-terminated
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/network_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/network_linux.go
new file mode 100644
index 0000000..5075bee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/network_linux.go
@@ -0,0 +1,259 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/vishvananda/netlink"
+)
+
+var strategies = map[string]networkStrategy{
+ "veth": &veth{},
+ "loopback": &loopback{},
+}
+
+// networkStrategy represents a specific network configuration for
+// a container's networking stack
+type networkStrategy interface {
+ create(*network, int) error
+ initialize(*network) error
+ detach(*configs.Network) error
+ attach(*configs.Network) error
+}
+
+// getStrategy returns the specific network strategy for the
+// provided type.
+func getStrategy(tpe string) (networkStrategy, error) {
+ s, exists := strategies[tpe]
+ if !exists {
+ return nil, fmt.Errorf("unknown strategy type %q", tpe)
+ }
+ return s, nil
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) {
+ out := &NetworkInterface{Name: interfaceName}
+ // This can happen if the network runtime information is missing - possible if the
+ // container was created by an old version of libcontainer.
+ if interfaceName == "" {
+ return out, nil
+ }
+ type netStatsPair struct {
+ // Where to write the output.
+ Out *uint64
+ // The network stats file to read.
+ File string
+ }
+ // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+ netStats := []netStatsPair{
+ {Out: &out.RxBytes, File: "tx_bytes"},
+ {Out: &out.RxPackets, File: "tx_packets"},
+ {Out: &out.RxErrors, File: "tx_errors"},
+ {Out: &out.RxDropped, File: "tx_dropped"},
+
+ {Out: &out.TxBytes, File: "rx_bytes"},
+ {Out: &out.TxPackets, File: "rx_packets"},
+ {Out: &out.TxErrors, File: "rx_errors"},
+ {Out: &out.TxDropped, File: "rx_dropped"},
+ }
+ for _, netStat := range netStats {
+ data, err := readSysfsNetworkStats(interfaceName, netStat.File)
+ if err != nil {
+ return nil, err
+ }
+ *(netStat.Out) = data
+ }
+ return out, nil
+}
+
+// Reads the specified statistics available under /sys/class/net//statistics
+func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// loopback is a network strategy that provides a basic loopback device
+type loopback struct {
+}
+
+func (l *loopback) create(n *network, nspid int) error {
+ return nil
+}
+
+func (l *loopback) initialize(config *network) error {
+ return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}})
+}
+
+func (l *loopback) attach(n *configs.Network) (err error) {
+ return nil
+}
+
+func (l *loopback) detach(n *configs.Network) (err error) {
+ return nil
+}
+
+// veth is a network strategy that uses a bridge and creates
+// a veth pair, one that is attached to the bridge on the host and the other
+// is placed inside the container's namespace
+type veth struct {
+}
+
+func (v *veth) detach(n *configs.Network) (err error) {
+ return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil)
+}
+
+// attach a container network interface to an external network
+func (v *veth) attach(n *configs.Network) (err error) {
+ brl, err := netlink.LinkByName(n.Bridge)
+ if err != nil {
+ return err
+ }
+ br, ok := brl.(*netlink.Bridge)
+ if !ok {
+ return fmt.Errorf("Wrong device type %T", brl)
+ }
+ host, err := netlink.LinkByName(n.HostInterfaceName)
+ if err != nil {
+ return err
+ }
+
+ if err := netlink.LinkSetMaster(host, br); err != nil {
+ return err
+ }
+ if err := netlink.LinkSetMTU(host, n.Mtu); err != nil {
+ return err
+ }
+ if n.HairpinMode {
+ if err := netlink.LinkSetHairpin(host, true); err != nil {
+ return err
+ }
+ }
+ if err := netlink.LinkSetUp(host); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (v *veth) create(n *network, nspid int) (err error) {
+ tmpName, err := v.generateTempPeerName()
+ if err != nil {
+ return err
+ }
+ n.TempVethPeerName = tmpName
+ if n.Bridge == "" {
+ return fmt.Errorf("bridge is not specified")
+ }
+ veth := &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: n.HostInterfaceName,
+ TxQLen: n.TxQueueLen,
+ },
+ PeerName: n.TempVethPeerName,
+ }
+ if err := netlink.LinkAdd(veth); err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ netlink.LinkDel(veth)
+ }
+ }()
+ if err := v.attach(&n.Network); err != nil {
+ return err
+ }
+ child, err := netlink.LinkByName(n.TempVethPeerName)
+ if err != nil {
+ return err
+ }
+ return netlink.LinkSetNsPid(child, nspid)
+}
+
+func (v *veth) generateTempPeerName() (string, error) {
+ return utils.GenerateRandomName("veth", 7)
+}
+
+func (v *veth) initialize(config *network) error {
+ peer := config.TempVethPeerName
+ if peer == "" {
+ return fmt.Errorf("peer is not specified")
+ }
+ child, err := netlink.LinkByName(peer)
+ if err != nil {
+ return err
+ }
+ if err := netlink.LinkSetDown(child); err != nil {
+ return err
+ }
+ if err := netlink.LinkSetName(child, config.Name); err != nil {
+ return err
+ }
+ // get the interface again after we changed the name as the index also changes.
+ if child, err = netlink.LinkByName(config.Name); err != nil {
+ return err
+ }
+ if config.MacAddress != "" {
+ mac, err := net.ParseMAC(config.MacAddress)
+ if err != nil {
+ return err
+ }
+ if err := netlink.LinkSetHardwareAddr(child, mac); err != nil {
+ return err
+ }
+ }
+ ip, err := netlink.ParseAddr(config.Address)
+ if err != nil {
+ return err
+ }
+ if err := netlink.AddrAdd(child, ip); err != nil {
+ return err
+ }
+ if config.IPv6Address != "" {
+ ip6, err := netlink.ParseAddr(config.IPv6Address)
+ if err != nil {
+ return err
+ }
+ if err := netlink.AddrAdd(child, ip6); err != nil {
+ return err
+ }
+ }
+ if err := netlink.LinkSetMTU(child, config.Mtu); err != nil {
+ return err
+ }
+ if err := netlink.LinkSetUp(child); err != nil {
+ return err
+ }
+ if config.Gateway != "" {
+ gw := net.ParseIP(config.Gateway)
+ if err := netlink.RouteAdd(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: child.Attrs().Index,
+ Gw: gw,
+ }); err != nil {
+ return err
+ }
+ }
+ if config.IPv6Gateway != "" {
+ gw := net.ParseIP(config.IPv6Gateway)
+ if err := netlink.RouteAdd(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: child.Attrs().Index,
+ Gw: gw,
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/notify_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/notify_linux.go
new file mode 100644
index 0000000..839a50c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/notify_linux.go
@@ -0,0 +1,89 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+const oomCgroupName = "memory"
+
+type PressureLevel uint
+
+const (
+ LowPressure PressureLevel = iota
+ MediumPressure
+ CriticalPressure
+)
+
+func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) {
+ evFile, err := os.Open(filepath.Join(cgDir, evName))
+ if err != nil {
+ return nil, err
+ }
+ fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
+ if syserr != 0 {
+ evFile.Close()
+ return nil, syserr
+ }
+
+ eventfd := os.NewFile(fd, "eventfd")
+
+ eventControlPath := filepath.Join(cgDir, "cgroup.event_control")
+ data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg)
+ if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil {
+ eventfd.Close()
+ evFile.Close()
+ return nil, err
+ }
+ ch := make(chan struct{})
+ go func() {
+ defer func() {
+ close(ch)
+ eventfd.Close()
+ evFile.Close()
+ }()
+ buf := make([]byte, 8)
+ for {
+ if _, err := eventfd.Read(buf); err != nil {
+ return
+ }
+ // When a cgroup is destroyed, an event is sent to eventfd.
+ // So if the control path is gone, return instead of notifying.
+ if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
+ return
+ }
+ ch <- struct{}{}
+ }
+ }()
+ return ch, nil
+}
+
+// notifyOnOOM returns channel on which you can expect event about OOM,
+// if process died without OOM this channel will be closed.
+func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
+ dir := paths[oomCgroupName]
+ if dir == "" {
+ return nil, fmt.Errorf("path %q missing", oomCgroupName)
+ }
+
+ return registerMemoryEvent(dir, "memory.oom_control", "")
+}
+
+func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) {
+ dir := paths[oomCgroupName]
+ if dir == "" {
+ return nil, fmt.Errorf("path %q missing", oomCgroupName)
+ }
+
+ if level > CriticalPressure {
+ return nil, fmt.Errorf("invalid pressure level %d", level)
+ }
+
+ levelStr := []string{"low", "medium", "critical"}[level]
+ return registerMemoryEvent(dir, "memory.pressure_level", levelStr)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/README.md b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/README.md
new file mode 100644
index 0000000..d1a60ef
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/README.md
@@ -0,0 +1,25 @@
+## nsenter
+
+The `nsenter` package registers a special init constructor that is called before
+the Go runtime has a chance to boot. This provides us the ability to `setns` on
+existing namespaces and avoid the issues that the Go runtime has with multiple
+threads. This constructor will be called if this package is registered,
+imported, in your go application.
+
+The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
+package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
+called the preamble, is used as a header when compiling the C parts of the package.
+So every time we import package `nsenter`, the C code function `nsexec()` would be
+called. And package `nsenter` is now only imported in Docker execdriver, so every time
+before we call `execdriver.Exec()`, that C code would run.
+
+`nsexec()` will first check the environment variable `_LIBCONTAINER_INITPID`
+which will give the process of the container that should be joined. Namespaces fd will
+be found from `/proc/[pid]/ns` and set by `setns` syscall.
+
+And then get the pipe number from `_LIBCONTAINER_INITPIPE`, error message could
+be transfered through it. If tty is added, `_LIBCONTAINER_CONSOLE_PATH` will
+have value and start a console for output.
+
+Finally, `nsexec()` will clone a child process , exit the parent process and let
+the Go runtime take over.
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
new file mode 100644
index 0000000..07f4d63
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go
@@ -0,0 +1,12 @@
+// +build linux,!gccgo
+
+package nsenter
+
+/*
+#cgo CFLAGS: -Wall
+extern void nsexec();
+void __attribute__((constructor)) init(void) {
+ nsexec();
+}
+*/
+import "C"
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
new file mode 100644
index 0000000..63c7a3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go
@@ -0,0 +1,25 @@
+// +build linux,gccgo
+
+package nsenter
+
+/*
+#cgo CFLAGS: -Wall
+extern void nsexec();
+void __attribute__((constructor)) init(void) {
+ nsexec();
+}
+*/
+import "C"
+
+// AlwaysFalse is here to stay false
+// (and be exported so the compiler doesn't optimize out its reference)
+var AlwaysFalse bool
+
+func init() {
+ if AlwaysFalse {
+ // by referencing this C init() in a noop test, it will ensure the compiler
+ // links in the C function.
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
+ C.init()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
new file mode 100644
index 0000000..ac701ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go
@@ -0,0 +1,5 @@
+// +build !linux !cgo
+
+package nsenter
+
+import "C"
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
new file mode 100644
index 0000000..27e6e53
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
@@ -0,0 +1,260 @@
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+/* All arguments should be above stack, because it grows down */
+struct clone_arg {
+ /*
+ * Reserve some space for clone() to locate arguments
+ * and retcode in this place
+ */
+ char stack[4096] __attribute__ ((aligned(16)));
+ char stack_ptr[0];
+ jmp_buf *env;
+};
+
+#define pr_perror(fmt, ...) fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__)
+
+static int child_func(void *_arg)
+{
+ struct clone_arg *arg = (struct clone_arg *)_arg;
+ longjmp(*arg->env, 1);
+}
+
+// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12)
+#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
+#define _GNU_SOURCE
+#include "syscall.h"
+#if defined(__NR_setns) && !defined(SYS_setns)
+#define SYS_setns __NR_setns
+#endif
+#ifdef SYS_setns
+int setns(int fd, int nstype)
+{
+ return syscall(SYS_setns, fd, nstype);
+}
+#endif
+#endif
+
+static int clone_parent(jmp_buf * env) __attribute__ ((noinline));
+static int clone_parent(jmp_buf * env)
+{
+ struct clone_arg ca;
+ int child;
+
+ ca.env = env;
+ child = clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca);
+
+ return child;
+}
+
+static uint32_t readint32(char *buf)
+{
+ return *(uint32_t *) buf;
+}
+
+// list of known message types we want to send to bootstrap program
+// These are defined in libcontainer/message_linux.go
+#define INIT_MSG 62000
+#define PID_ATTR 27281
+#define CONSOLE_PATH_ATTR 27282
+
+void nsexec()
+{
+ char *namespaces[] = { "ipc", "uts", "net", "pid", "mnt", "user" };
+ const int num = sizeof(namespaces) / sizeof(char *);
+ jmp_buf env;
+ char buf[PATH_MAX], *val;
+ int i, tfd, self_tfd, child, n, len, pipenum, consolefd = -1;
+ pid_t pid = 0;
+
+ // if we dont have INITTYPE or this is the init process, skip the bootstrap process
+ val = getenv("_LIBCONTAINER_INITTYPE");
+ if (val == NULL || strcmp(val, "standard") == 0) {
+ return;
+ }
+ if (strcmp(val, "setns") != 0) {
+ pr_perror("Invalid inittype %s", val);
+ exit(1);
+ }
+
+ val = getenv("_LIBCONTAINER_INITPIPE");
+ if (val == NULL) {
+ pr_perror("Child pipe not found");
+ exit(1);
+ }
+ pipenum = atoi(val);
+ snprintf(buf, sizeof(buf), "%d", pipenum);
+ if (strcmp(val, buf)) {
+ pr_perror("Unable to parse _LIBCONTAINER_INITPIPE");
+ exit(1);
+ }
+
+ char nlbuf[NLMSG_HDRLEN];
+ struct nlmsghdr *nh;
+ if ((n = read(pipenum, nlbuf, NLMSG_HDRLEN)) != NLMSG_HDRLEN) {
+ pr_perror("Failed to read netlink header, got %d", n);
+ exit(1);
+ }
+
+ nh = (struct nlmsghdr *)nlbuf;
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ pr_perror("Invalid netlink header message");
+ exit(1);
+ }
+ if (nh->nlmsg_type != INIT_MSG) {
+ pr_perror("Unexpected netlink message type %d", nh->nlmsg_type);
+ exit(1);
+ }
+ // read the netlink payload
+ len = NLMSG_PAYLOAD(nh, 0);
+ char data[len];
+ if ((n = read(pipenum, data, len)) != len) {
+ pr_perror("Failed to read netlink payload, got %d", n);
+ exit(1);
+ }
+
+ int start = 0;
+ struct nlattr *attr;
+ while (start < len) {
+ int payload_len;
+ attr = (struct nlattr *)((void *)data + start);
+ start += NLA_HDRLEN;
+ payload_len = attr->nla_len - NLA_HDRLEN;
+ switch (attr->nla_type) {
+ case PID_ATTR:
+ pid = (pid_t) readint32(data + start);
+ break;
+ case CONSOLE_PATH_ATTR:
+ consolefd = open((char *)data + start, O_RDWR);
+ if (consolefd < 0) {
+ pr_perror("Failed to open console %s", (char *)data + start);
+ exit(1);
+ }
+ break;
+ }
+ start += NLA_ALIGN(payload_len);
+ }
+
+ // required pid to be passed
+ if (pid == 0) {
+ pr_perror("missing pid");
+ exit(1);
+ }
+
+ /* Check that the specified process exists */
+ snprintf(buf, PATH_MAX - 1, "/proc/%d/ns", pid);
+ tfd = open(buf, O_DIRECTORY | O_RDONLY);
+ if (tfd == -1) {
+ pr_perror("Failed to open \"%s\"", buf);
+ exit(1);
+ }
+
+ self_tfd = open("/proc/self/ns", O_DIRECTORY | O_RDONLY);
+ if (self_tfd == -1) {
+ pr_perror("Failed to open /proc/self/ns");
+ exit(1);
+ }
+
+ for (i = 0; i < num; i++) {
+ struct stat st;
+ struct stat self_st;
+ int fd;
+
+ /* Symlinks on all namespaces exist for dead processes, but they can't be opened */
+ if (fstatat(tfd, namespaces[i], &st, 0) == -1) {
+ // Ignore nonexistent namespaces.
+ if (errno == ENOENT)
+ continue;
+ }
+
+ /* Skip namespaces we're already part of */
+ if (fstatat(self_tfd, namespaces[i], &self_st, 0) != -1 && st.st_ino == self_st.st_ino) {
+ continue;
+ }
+
+ fd = openat(tfd, namespaces[i], O_RDONLY);
+ if (fd == -1) {
+ pr_perror("Failed to open ns file %s for ns %s", buf, namespaces[i]);
+ exit(1);
+ }
+ // Set the namespace.
+ if (setns(fd, 0) == -1) {
+ pr_perror("Failed to setns for %s", namespaces[i]);
+ exit(1);
+ }
+ close(fd);
+ }
+
+ close(self_tfd);
+ close(tfd);
+
+ if (setjmp(env) == 1) {
+ // Child
+
+ if (setsid() == -1) {
+ pr_perror("setsid failed");
+ exit(1);
+ }
+ if (consolefd != -1) {
+ if (ioctl(consolefd, TIOCSCTTY, 0) == -1) {
+ pr_perror("ioctl TIOCSCTTY failed");
+ exit(1);
+ }
+ if (dup3(consolefd, STDIN_FILENO, 0) != STDIN_FILENO) {
+ pr_perror("Failed to dup 0");
+ exit(1);
+ }
+ if (dup3(consolefd, STDOUT_FILENO, 0) != STDOUT_FILENO) {
+ pr_perror("Failed to dup 1");
+ exit(1);
+ }
+ if (dup3(consolefd, STDERR_FILENO, 0) != STDERR_FILENO) {
+ pr_perror("Failed to dup 2");
+ exit(1);
+ }
+ }
+ // Finish executing, let the Go runtime take over.
+ return;
+ }
+ // Parent
+
+ // We must fork to actually enter the PID namespace, use CLONE_PARENT
+ // so the child can have the right parent, and we don't need to forward
+ // the child's exit code or resend its death signal.
+ child = clone_parent(&env);
+ if (child < 0) {
+ pr_perror("Unable to fork");
+ exit(1);
+ }
+
+ len = snprintf(buf, sizeof(buf), "{ \"pid\" : %d }\n", child);
+
+ if (write(pipenum, buf, len) != len) {
+ pr_perror("Unable to send a child pid");
+ kill(child, SIGKILL);
+ exit(1);
+ }
+
+ exit(0);
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/process.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/process.go
new file mode 100644
index 0000000..9661df8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/process.go
@@ -0,0 +1,105 @@
+package libcontainer
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "os"
+)
+
+type processOperations interface {
+ wait() (*os.ProcessState, error)
+ signal(sig os.Signal) error
+ pid() int
+}
+
+// Process specifies the configuration and IO for a process inside
+// a container.
+type Process struct {
+ // The command to be run followed by any arguments.
+ Args []string
+
+ // Env specifies the environment variables for the process.
+ Env []string
+
+ // User will set the uid and gid of the executing process running inside the container
+ // local to the container's user and group configuration.
+ User string
+
+ // Cwd will change the processes current working directory inside the container's rootfs.
+ Cwd string
+
+ // Stdin is a pointer to a reader which provides the standard input stream.
+ Stdin io.Reader
+
+ // Stdout is a pointer to a writer which receives the standard output stream.
+ Stdout io.Writer
+
+ // Stderr is a pointer to a writer which receives the standard error stream.
+ Stderr io.Writer
+
+ // ExtraFiles specifies additional open files to be inherited by the container
+ ExtraFiles []*os.File
+
+ // consolePath is the path to the console allocated to the container.
+ consolePath string
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capabilities not specified will be dropped from the processes capability mask
+ Capabilities []string
+
+ ops processOperations
+}
+
+// Wait waits for the process to exit.
+// Wait releases any resources associated with the Process
+func (p Process) Wait() (*os.ProcessState, error) {
+ if p.ops == nil {
+ return nil, newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted)
+ }
+ return p.ops.wait()
+}
+
+// Pid returns the process ID
+func (p Process) Pid() (int, error) {
+ // math.MinInt32 is returned here, because it's invalid value
+ // for the kill() system call.
+ if p.ops == nil {
+ return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted)
+ }
+ return p.ops.pid(), nil
+}
+
+// Signal sends a signal to the Process.
+func (p Process) Signal(sig os.Signal) error {
+ if p.ops == nil {
+ return newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted)
+ }
+ return p.ops.signal(sig)
+}
+
+// IO holds the process's STDIO
+type IO struct {
+ Stdin io.WriteCloser
+ Stdout io.ReadCloser
+ Stderr io.ReadCloser
+}
+
+// NewConsole creates new console for process and returns it
+func (p *Process) NewConsole(rootuid int) (Console, error) {
+ console, err := NewConsole(rootuid, rootuid)
+ if err != nil {
+ return nil, err
+ }
+ p.consolePath = console.Path()
+ return console, nil
+}
+
+// ConsoleFromPath sets the process's console with the path provided
+func (p *Process) ConsoleFromPath(path string) error {
+ if p.consolePath != "" {
+ return newGenericError(fmt.Errorf("console path already exists for process"), ConsoleExists)
+ }
+ p.consolePath = path
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/process_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/process_linux.go
new file mode 100644
index 0000000..ac457c2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/process_linux.go
@@ -0,0 +1,405 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "syscall"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/runc/libcontainer/utils"
+)
+
+type parentProcess interface {
+ // pid returns the pid for the running process.
+ pid() int
+
+ // start starts the process execution.
+ start() error
+
+ // send a SIGKILL to the process and wait for the exit.
+ terminate() error
+
+ // wait waits on the process returning the process state.
+ wait() (*os.ProcessState, error)
+
+ // startTime return's the process start time.
+ startTime() (string, error)
+
+ signal(os.Signal) error
+
+ externalDescriptors() []string
+
+ setExternalDescriptors(fds []string)
+}
+
+type setnsProcess struct {
+ cmd *exec.Cmd
+ parentPipe *os.File
+ childPipe *os.File
+ cgroupPaths map[string]string
+ config *initConfig
+ fds []string
+ process *Process
+ bootstrapData io.Reader
+}
+
+func (p *setnsProcess) startTime() (string, error) {
+ return system.GetProcessStartTime(p.pid())
+}
+
+func (p *setnsProcess) signal(sig os.Signal) error {
+ s, ok := sig.(syscall.Signal)
+ if !ok {
+ return errors.New("os: unsupported signal type")
+ }
+ return syscall.Kill(p.pid(), s)
+}
+
+func (p *setnsProcess) start() (err error) {
+ defer p.parentPipe.Close()
+ err = p.cmd.Start()
+ p.childPipe.Close()
+ if err != nil {
+ return newSystemError(err)
+ }
+ if p.bootstrapData != nil {
+ if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
+ return newSystemError(err)
+ }
+ }
+ if err = p.execSetns(); err != nil {
+ return newSystemError(err)
+ }
+ if len(p.cgroupPaths) > 0 {
+ if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
+ return newSystemError(err)
+ }
+ }
+ if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
+ return newSystemError(err)
+ }
+
+ if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
+ return newSystemError(err)
+ }
+ // wait for the child process to fully complete and receive an error message
+ // if one was encoutered
+ var ierr *genericError
+ if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
+ return newSystemError(err)
+ }
+ // Must be done after Shutdown so the child will exit and we can wait for it.
+ if ierr != nil {
+ p.wait()
+ return newSystemError(ierr)
+ }
+ return nil
+}
+
+// execSetns runs the process that executes C code to perform the setns calls
+// because setns support requires the C process to fork off a child and perform the setns
+// before the go runtime boots, we wait on the process to die and receive the child's pid
+// over the provided pipe.
+func (p *setnsProcess) execSetns() error {
+ status, err := p.cmd.Process.Wait()
+ if err != nil {
+ p.cmd.Wait()
+ return newSystemError(err)
+ }
+ if !status.Success() {
+ p.cmd.Wait()
+ return newSystemError(&exec.ExitError{ProcessState: status})
+ }
+ var pid *pid
+ if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
+ p.cmd.Wait()
+ return newSystemError(err)
+ }
+ process, err := os.FindProcess(pid.Pid)
+ if err != nil {
+ return err
+ }
+ p.cmd.Process = process
+ p.process.ops = p
+ return nil
+}
+
+// terminate sends a SIGKILL to the forked process for the setns routine then waits to
+// avoid the process becomming a zombie.
+func (p *setnsProcess) terminate() error {
+ if p.cmd.Process == nil {
+ return nil
+ }
+ err := p.cmd.Process.Kill()
+ if _, werr := p.wait(); err == nil {
+ err = werr
+ }
+ return err
+}
+
+func (p *setnsProcess) wait() (*os.ProcessState, error) {
+ err := p.cmd.Wait()
+
+ // Return actual ProcessState even on Wait error
+ return p.cmd.ProcessState, err
+}
+
+func (p *setnsProcess) pid() int {
+ return p.cmd.Process.Pid
+}
+
+func (p *setnsProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *setnsProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
+
+type initProcess struct {
+ cmd *exec.Cmd
+ parentPipe *os.File
+ childPipe *os.File
+ config *initConfig
+ manager cgroups.Manager
+ container *linuxContainer
+ fds []string
+ process *Process
+}
+
+func (p *initProcess) pid() int {
+ return p.cmd.Process.Pid
+}
+
+func (p *initProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *initProcess) start() (err error) {
+ defer p.parentPipe.Close()
+ err = p.cmd.Start()
+ p.process.ops = p
+ p.childPipe.Close()
+ if err != nil {
+ p.process.ops = nil
+ return newSystemError(err)
+ }
+ // Save the standard descriptor names before the container process
+ // can potentially move them (e.g., via dup2()). If we don't do this now,
+ // we won't know at checkpoint time which file descriptor to look up.
+ fds, err := getPipeFds(p.pid())
+ if err != nil {
+ return newSystemError(err)
+ }
+ p.setExternalDescriptors(fds)
+ // Do this before syncing with child so that no children
+ // can escape the cgroup
+ if err := p.manager.Apply(p.pid()); err != nil {
+ return newSystemError(err)
+ }
+ defer func() {
+ if err != nil {
+ // TODO: should not be the responsibility to call here
+ p.manager.Destroy()
+ }
+ }()
+ if p.config.Config.Hooks != nil {
+ s := configs.HookState{
+ Version: p.container.config.Version,
+ ID: p.container.id,
+ Pid: p.pid(),
+ Root: p.config.Config.Rootfs,
+ }
+ for _, hook := range p.config.Config.Hooks.Prestart {
+ if err := hook.Run(s); err != nil {
+ return newSystemError(err)
+ }
+ }
+ }
+ if err := p.createNetworkInterfaces(); err != nil {
+ return newSystemError(err)
+ }
+ if err := p.sendConfig(); err != nil {
+ return newSystemError(err)
+ }
+ var (
+ procSync syncT
+ sentRun bool
+ ierr *genericError
+ )
+
+loop:
+ for {
+ if err := json.NewDecoder(p.parentPipe).Decode(&procSync); err != nil {
+ if err == io.EOF {
+ break loop
+ }
+ return newSystemError(err)
+ }
+ switch procSync.Type {
+ case procStart:
+ break loop
+ case procReady:
+ if err := p.manager.Set(p.config.Config); err != nil {
+ return newSystemError(err)
+ }
+ // Sync with child.
+ if err := utils.WriteJSON(p.parentPipe, syncT{procRun}); err != nil {
+ return newSystemError(err)
+ }
+ sentRun = true
+ case procError:
+ // wait for the child process to fully complete and receive an error message
+ // if one was encoutered
+ if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
+ return newSystemError(err)
+ }
+ if ierr != nil {
+ break loop
+ }
+ // Programmer error.
+ panic("No error following JSON procError payload.")
+ default:
+ return newSystemError(fmt.Errorf("invalid JSON synchronisation payload from child"))
+ }
+ }
+ if !sentRun {
+ return newSystemError(fmt.Errorf("could not synchronise with container process"))
+ }
+ if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
+ return newSystemError(err)
+ }
+ // Must be done after Shutdown so the child will exit and we can wait for it.
+ if ierr != nil {
+ p.wait()
+ return newSystemError(ierr)
+ }
+ return nil
+}
+
+func (p *initProcess) wait() (*os.ProcessState, error) {
+ err := p.cmd.Wait()
+ if err != nil {
+ return p.cmd.ProcessState, err
+ }
+ // we should kill all processes in cgroup when init is died if we use host PID namespace
+ if p.cmd.SysProcAttr.Cloneflags&syscall.CLONE_NEWPID == 0 {
+ killCgroupProcesses(p.manager)
+ }
+ return p.cmd.ProcessState, nil
+}
+
+func (p *initProcess) terminate() error {
+ if p.cmd.Process == nil {
+ return nil
+ }
+ err := p.cmd.Process.Kill()
+ if _, werr := p.wait(); err == nil {
+ err = werr
+ }
+ return err
+}
+
+func (p *initProcess) startTime() (string, error) {
+ return system.GetProcessStartTime(p.pid())
+}
+
+func (p *initProcess) sendConfig() error {
+ // send the state to the container's init process then shutdown writes for the parent
+ return utils.WriteJSON(p.parentPipe, p.config)
+}
+
+func (p *initProcess) createNetworkInterfaces() error {
+ for _, config := range p.config.Config.Networks {
+ strategy, err := getStrategy(config.Type)
+ if err != nil {
+ return err
+ }
+ n := &network{
+ Network: *config,
+ }
+ if err := strategy.create(n, p.pid()); err != nil {
+ return err
+ }
+ p.config.Networks = append(p.config.Networks, n)
+ }
+ return nil
+}
+
+func (p *initProcess) signal(sig os.Signal) error {
+ s, ok := sig.(syscall.Signal)
+ if !ok {
+ return errors.New("os: unsupported signal type")
+ }
+ return syscall.Kill(p.pid(), s)
+}
+
+func (p *initProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
+
+func getPipeFds(pid int) ([]string, error) {
+ fds := make([]string, 3)
+
+ dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd")
+ for i := 0; i < 3; i++ {
+ f := filepath.Join(dirPath, strconv.Itoa(i))
+ target, err := os.Readlink(f)
+ if err != nil {
+ return fds, err
+ }
+ fds[i] = target
+ }
+ return fds, nil
+}
+
+// InitializeIO creates pipes for use with the process's STDIO
+// and returns the opposite side for each
+func (p *Process) InitializeIO(rootuid int) (i *IO, err error) {
+ var fds []uintptr
+ i = &IO{}
+ // cleanup in case of an error
+ defer func() {
+ if err != nil {
+ for _, fd := range fds {
+ syscall.Close(int(fd))
+ }
+ }
+ }()
+ // STDIN
+ r, w, err := os.Pipe()
+ if err != nil {
+ return nil, err
+ }
+ fds = append(fds, r.Fd(), w.Fd())
+ p.Stdin, i.Stdin = r, w
+ // STDOUT
+ if r, w, err = os.Pipe(); err != nil {
+ return nil, err
+ }
+ fds = append(fds, r.Fd(), w.Fd())
+ p.Stdout, i.Stdout = w, r
+ // STDERR
+ if r, w, err = os.Pipe(); err != nil {
+ return nil, err
+ }
+ fds = append(fds, r.Fd(), w.Fd())
+ p.Stderr, i.Stderr = w, r
+ // change ownership of the pipes incase we are in a user namespace
+ for _, fd := range fds {
+ if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil {
+ return nil, err
+ }
+ }
+ return i, nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/restored_process.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/restored_process.go
new file mode 100644
index 0000000..a96f4ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/restored_process.go
@@ -0,0 +1,122 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) {
+ var (
+ err error
+ )
+ proc, err := os.FindProcess(pid)
+ if err != nil {
+ return nil, err
+ }
+ started, err := system.GetProcessStartTime(pid)
+ if err != nil {
+ return nil, err
+ }
+ return &restoredProcess{
+ proc: proc,
+ processStartTime: started,
+ fds: fds,
+ }, nil
+}
+
+type restoredProcess struct {
+ proc *os.Process
+ processStartTime string
+ fds []string
+}
+
+func (p *restoredProcess) start() error {
+ return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError)
+}
+
+func (p *restoredProcess) pid() int {
+ return p.proc.Pid
+}
+
+func (p *restoredProcess) terminate() error {
+ err := p.proc.Kill()
+ if _, werr := p.wait(); err == nil {
+ err = werr
+ }
+ return err
+}
+
+func (p *restoredProcess) wait() (*os.ProcessState, error) {
+ // TODO: how do we wait on the actual process?
+ // maybe use --exec-cmd in criu
+ st, err := p.proc.Wait()
+ if err != nil {
+ return nil, err
+ }
+ return st, nil
+}
+
+func (p *restoredProcess) startTime() (string, error) {
+ return p.processStartTime, nil
+}
+
+func (p *restoredProcess) signal(s os.Signal) error {
+ return p.proc.Signal(s)
+}
+
+func (p *restoredProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *restoredProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
+
+// nonChildProcess represents a process where the calling process is not
+// the parent process. This process is created when a factory loads a container from
+// a persisted state.
+type nonChildProcess struct {
+ processPid int
+ processStartTime string
+ fds []string
+}
+
+func (p *nonChildProcess) start() error {
+ return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError)
+}
+
+func (p *nonChildProcess) pid() int {
+ return p.processPid
+}
+
+func (p *nonChildProcess) terminate() error {
+ return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError)
+}
+
+func (p *nonChildProcess) wait() (*os.ProcessState, error) {
+ return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError)
+}
+
+func (p *nonChildProcess) startTime() (string, error) {
+ return p.processStartTime, nil
+}
+
+func (p *nonChildProcess) signal(s os.Signal) error {
+ proc, err := os.FindProcess(p.processPid)
+ if err != nil {
+ return err
+ }
+ return proc.Signal(s)
+}
+
+func (p *nonChildProcess) externalDescriptors() []string {
+ return p.fds
+}
+
+func (p *nonChildProcess) setExternalDescriptors(newFds []string) {
+ p.fds = newFds
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/rootfs_linux.go
new file mode 100644
index 0000000..aa061ab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/rootfs_linux.go
@@ -0,0 +1,689 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/docker/docker/pkg/symlink"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/label"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
+
+// setupRootfs sets up the devices, mount points, and filesystems for use inside a
+// new mount namespace.
+func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
+ if err := prepareRoot(config); err != nil {
+ return newSystemError(err)
+ }
+
+ setupDev := len(config.Devices) != 0
+ for _, m := range config.Mounts {
+ for _, precmd := range m.PremountCmds {
+ if err := mountCmd(precmd); err != nil {
+ return newSystemError(err)
+ }
+ }
+ if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil {
+ return newSystemError(err)
+ }
+
+ for _, postcmd := range m.PostmountCmds {
+ if err := mountCmd(postcmd); err != nil {
+ return newSystemError(err)
+ }
+ }
+ }
+ if setupDev {
+ if err := createDevices(config); err != nil {
+ return newSystemError(err)
+ }
+ if err := setupPtmx(config, console); err != nil {
+ return newSystemError(err)
+ }
+ if err := setupDevSymlinks(config.Rootfs); err != nil {
+ return newSystemError(err)
+ }
+ }
+ if err := syscall.Chdir(config.Rootfs); err != nil {
+ return newSystemError(err)
+ }
+ if config.NoPivotRoot {
+ err = msMoveRoot(config.Rootfs)
+ } else {
+ err = pivotRoot(config.Rootfs, config.PivotDir)
+ }
+ if err != nil {
+ return newSystemError(err)
+ }
+ if setupDev {
+ if err := reOpenDevNull(); err != nil {
+ return newSystemError(err)
+ }
+ }
+ if config.Readonlyfs {
+ if err := setReadonly(); err != nil {
+ return newSystemError(err)
+ }
+ }
+ syscall.Umask(0022)
+ return nil
+}
+
+func mountCmd(cmd configs.Command) error {
+
+ command := exec.Command(cmd.Path, cmd.Args[:]...)
+ command.Env = cmd.Env
+ command.Dir = cmd.Dir
+ if out, err := command.CombinedOutput(); err != nil {
+ return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err)
+ }
+
+ return nil
+}
+
+func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
+ var (
+ dest = m.Destination
+ )
+ if !strings.HasPrefix(dest, rootfs) {
+ dest = filepath.Join(rootfs, dest)
+ }
+
+ switch m.Device {
+ case "proc", "sysfs":
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ // Selinux kernels do not support labeling of /proc or /sys
+ return mountPropagate(m, rootfs, "")
+ case "mqueue":
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ if err := mountPropagate(m, rootfs, mountLabel); err != nil {
+ // older kernels do not support labeling of /dev/mqueue
+ if err := mountPropagate(m, rootfs, ""); err != nil {
+ return err
+ }
+ }
+ return label.SetFileLabel(dest, mountLabel)
+ case "tmpfs":
+ stat, err := os.Stat(dest)
+ if err != nil {
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ }
+ if err := mountPropagate(m, rootfs, mountLabel); err != nil {
+ return err
+ }
+ if stat != nil {
+ if err = os.Chmod(dest, stat.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+ case "devpts":
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ return mountPropagate(m, rootfs, mountLabel)
+ case "securityfs":
+ if err := os.MkdirAll(dest, 0755); err != nil {
+ return err
+ }
+ return mountPropagate(m, rootfs, mountLabel)
+ case "bind":
+ stat, err := os.Stat(m.Source)
+ if err != nil {
+ // error out if the source of a bind mount does not exist as we will be
+ // unable to bind anything to it.
+ return err
+ }
+ // ensure that the destination of the bind mount is resolved of symlinks at mount time because
+ // any previous mounts can invalidate the next mount's destination.
+ // this can happen when a user specifies mounts within other mounts to cause breakouts or other
+ // evil stuff to try to escape the container's rootfs.
+ if dest, err = symlink.FollowSymlinkInScope(filepath.Join(rootfs, m.Destination), rootfs); err != nil {
+ return err
+ }
+ if err := checkMountDestination(rootfs, dest); err != nil {
+ return err
+ }
+ // update the mount with the correct dest after symlinks are resolved.
+ m.Destination = dest
+ if err := createIfNotExists(dest, stat.IsDir()); err != nil {
+ return err
+ }
+ if err := mountPropagate(m, rootfs, mountLabel); err != nil {
+ return err
+ }
+ // bind mount won't change mount options, we need remount to make mount options effective.
+ // first check that we have non-default options required before attempting a remount
+ if m.Flags&^(syscall.MS_REC|syscall.MS_REMOUNT|syscall.MS_BIND) != 0 {
+ // only remount if unique mount options are set
+ if err := remount(m, rootfs); err != nil {
+ return err
+ }
+ }
+
+ if m.Relabel != "" {
+ if err := label.Validate(m.Relabel); err != nil {
+ return err
+ }
+ shared := label.IsShared(m.Relabel)
+ if err := label.Relabel(m.Source, mountLabel, shared); err != nil {
+ return err
+ }
+ }
+ case "cgroup":
+ binds, err := getCgroupMounts(m)
+ if err != nil {
+ return err
+ }
+ var merged []string
+ for _, b := range binds {
+ ss := filepath.Base(b.Destination)
+ if strings.Contains(ss, ",") {
+ merged = append(merged, ss)
+ }
+ }
+ tmpfs := &configs.Mount{
+ Source: "tmpfs",
+ Device: "tmpfs",
+ Destination: m.Destination,
+ Flags: defaultMountFlags,
+ Data: "mode=755",
+ PropagationFlags: m.PropagationFlags,
+ }
+ if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil {
+ return err
+ }
+ for _, b := range binds {
+ if err := mountToRootfs(b, rootfs, mountLabel); err != nil {
+ return err
+ }
+ }
+ // create symlinks for merged cgroups
+ cwd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+ if err := os.Chdir(filepath.Join(rootfs, m.Destination)); err != nil {
+ return err
+ }
+ for _, mc := range merged {
+ for _, ss := range strings.Split(mc, ",") {
+ if err := os.Symlink(mc, ss); err != nil {
+ // if cgroup already exists, then okay(it could have been created before)
+ if os.IsExist(err) {
+ continue
+ }
+ os.Chdir(cwd)
+ return err
+ }
+ }
+ }
+ if err := os.Chdir(cwd); err != nil {
+ return err
+ }
+ if m.Flags&syscall.MS_RDONLY != 0 {
+ // remount cgroup root as readonly
+ mcgrouproot := &configs.Mount{
+ Destination: m.Destination,
+ Flags: defaultMountFlags | syscall.MS_RDONLY,
+ }
+ if err := remount(mcgrouproot, rootfs); err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("unknown mount device %q to %q", m.Device, m.Destination)
+ }
+ return nil
+}
+
+func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) {
+ mounts, err := cgroups.GetCgroupMounts()
+ if err != nil {
+ return nil, err
+ }
+
+ cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return nil, err
+ }
+
+ var binds []*configs.Mount
+
+ for _, mm := range mounts {
+ dir, err := mm.GetThisCgroupDir(cgroupPaths)
+ if err != nil {
+ return nil, err
+ }
+ relDir, err := filepath.Rel(mm.Root, dir)
+ if err != nil {
+ return nil, err
+ }
+ binds = append(binds, &configs.Mount{
+ Device: "bind",
+ Source: filepath.Join(mm.Mountpoint, relDir),
+ Destination: filepath.Join(m.Destination, strings.Join(mm.Subsystems, ",")),
+ Flags: syscall.MS_BIND | syscall.MS_REC | m.Flags,
+ PropagationFlags: m.PropagationFlags,
+ })
+ }
+
+ return binds, nil
+}
+
+// checkMountDestination checks to ensure that the mount destination is not over the top of /proc.
+// dest is required to be an abs path and have any symlinks resolved before calling this function.
+func checkMountDestination(rootfs, dest string) error {
+ if filepath.Clean(rootfs) == filepath.Clean(dest) {
+ return fmt.Errorf("mounting into / is prohibited")
+ }
+ invalidDestinations := []string{
+ "/proc",
+ }
+ // White list, it should be sub directories of invalid destinations
+ validDestinations := []string{
+ // These entries can be bind mounted by files emulated by fuse,
+ // so commands like top, free displays stats in container.
+ "/proc/cpuinfo",
+ "/proc/diskstats",
+ "/proc/meminfo",
+ "/proc/stats",
+ }
+ for _, valid := range validDestinations {
+ path, err := filepath.Rel(filepath.Join(rootfs, valid), dest)
+ if err != nil {
+ return err
+ }
+ if path == "." {
+ return nil
+ }
+ }
+ for _, invalid := range invalidDestinations {
+ path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest)
+ if err != nil {
+ return err
+ }
+ if path == "." || !strings.HasPrefix(path, "..") {
+ return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid)
+ }
+ }
+ return nil
+}
+
+func setupDevSymlinks(rootfs string) error {
+ var links = [][2]string{
+ {"/proc/self/fd", "/dev/fd"},
+ {"/proc/self/fd/0", "/dev/stdin"},
+ {"/proc/self/fd/1", "/dev/stdout"},
+ {"/proc/self/fd/2", "/dev/stderr"},
+ }
+ // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink
+ // in /dev if it exists in /proc.
+ if _, err := os.Stat("/proc/kcore"); err == nil {
+ links = append(links, [2]string{"/proc/kcore", "/dev/kcore"})
+ }
+ for _, link := range links {
+ var (
+ src = link[0]
+ dst = filepath.Join(rootfs, link[1])
+ )
+ if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("symlink %s %s %s", src, dst, err)
+ }
+ }
+ return nil
+}
+
+// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs
+// this method will make them point to `/dev/null` in this container's rootfs. This
+// needs to be called after we chroot/pivot into the container's rootfs so that any
+// symlinks are resolved locally.
+func reOpenDevNull() error {
+ var stat, devNullStat syscall.Stat_t
+ file, err := os.OpenFile("/dev/null", os.O_RDWR, 0)
+ if err != nil {
+ return fmt.Errorf("Failed to open /dev/null - %s", err)
+ }
+ defer file.Close()
+ if err := syscall.Fstat(int(file.Fd()), &devNullStat); err != nil {
+ return err
+ }
+ for fd := 0; fd < 3; fd++ {
+ if err := syscall.Fstat(fd, &stat); err != nil {
+ return err
+ }
+ if stat.Rdev == devNullStat.Rdev {
+ // Close and re-open the fd.
+ if err := syscall.Dup3(int(file.Fd()), fd, 0); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Create the device nodes in the container.
+func createDevices(config *configs.Config) error {
+ useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER)
+ oldMask := syscall.Umask(0000)
+ for _, node := range config.Devices {
+ // containers running in a user namespace are not allowed to mknod
+ // devices so we can just bind mount it from the host.
+ if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil {
+ syscall.Umask(oldMask)
+ return err
+ }
+ }
+ syscall.Umask(oldMask)
+ return nil
+}
+
+func bindMountDeviceNode(dest string, node *configs.Device) error {
+ f, err := os.Create(dest)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+ if f != nil {
+ f.Close()
+ }
+ return syscall.Mount(node.Path, dest, "bind", syscall.MS_BIND, "")
+}
+
+// Creates the device node in the rootfs of the container.
+func createDeviceNode(rootfs string, node *configs.Device, bind bool) error {
+ dest := filepath.Join(rootfs, node.Path)
+ if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
+ return err
+ }
+
+ if bind {
+ return bindMountDeviceNode(dest, node)
+ }
+ if err := mknodDevice(dest, node); err != nil {
+ if os.IsExist(err) {
+ return nil
+ } else if os.IsPermission(err) {
+ return bindMountDeviceNode(dest, node)
+ }
+ return err
+ }
+ return nil
+}
+
+func mknodDevice(dest string, node *configs.Device) error {
+ fileMode := node.FileMode
+ switch node.Type {
+ case 'c':
+ fileMode |= syscall.S_IFCHR
+ case 'b':
+ fileMode |= syscall.S_IFBLK
+ default:
+ return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
+ }
+ if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil {
+ return err
+ }
+ return syscall.Chown(dest, int(node.Uid), int(node.Gid))
+}
+
+func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info {
+ for _, m := range mountinfo {
+ if m.Mountpoint == dir {
+ return m
+ }
+ }
+ return nil
+}
+
+// Get the parent mount point of directory passed in as argument. Also return
+// optional fields.
+func getParentMount(rootfs string) (string, string, error) {
+ var path string
+
+ mountinfos, err := mount.GetMounts()
+ if err != nil {
+ return "", "", err
+ }
+
+ mountinfo := getMountInfo(mountinfos, rootfs)
+ if mountinfo != nil {
+ return rootfs, mountinfo.Optional, nil
+ }
+
+ path = rootfs
+ for {
+ path = filepath.Dir(path)
+
+ mountinfo = getMountInfo(mountinfos, path)
+ if mountinfo != nil {
+ return path, mountinfo.Optional, nil
+ }
+
+ if path == "/" {
+ break
+ }
+ }
+
+ // If we are here, we did not find parent mount. Something is wrong.
+ return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs)
+}
+
+// Make parent mount private if it was shared
+func rootfsParentMountPrivate(config *configs.Config) error {
+ sharedMount := false
+
+ parentMount, optionalOpts, err := getParentMount(config.Rootfs)
+ if err != nil {
+ return err
+ }
+
+ optsSplit := strings.Split(optionalOpts, " ")
+ for _, opt := range optsSplit {
+ if strings.HasPrefix(opt, "shared:") {
+ sharedMount = true
+ break
+ }
+ }
+
+ // Make parent mount PRIVATE if it was shared. It is needed for two
+ // reasons. First of all pivot_root() will fail if parent mount is
+ // shared. Secondly when we bind mount rootfs it will propagate to
+ // parent namespace and we don't want that to happen.
+ if sharedMount {
+ return syscall.Mount("", parentMount, "", syscall.MS_PRIVATE, "")
+ }
+
+ return nil
+}
+
+func prepareRoot(config *configs.Config) error {
+ flag := syscall.MS_SLAVE | syscall.MS_REC
+ if config.RootPropagation != 0 {
+ flag = config.RootPropagation
+ }
+ if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil {
+ return err
+ }
+
+ if err := rootfsParentMountPrivate(config); err != nil {
+ return err
+ }
+
+ return syscall.Mount(config.Rootfs, config.Rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, "")
+}
+
+func setReadonly() error {
+ return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "")
+}
+
+func setupPtmx(config *configs.Config, console *linuxConsole) error {
+ ptmx := filepath.Join(config.Rootfs, "dev/ptmx")
+ if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err := os.Symlink("pts/ptmx", ptmx); err != nil {
+ return fmt.Errorf("symlink dev ptmx %s", err)
+ }
+ if console != nil {
+ return console.mount(config.Rootfs, config.MountLabel)
+ }
+ return nil
+}
+
+func pivotRoot(rootfs, pivotBaseDir string) error {
+ if pivotBaseDir == "" {
+ pivotBaseDir = "/"
+ }
+ tmpDir := filepath.Join(rootfs, pivotBaseDir)
+ if err := os.MkdirAll(tmpDir, 0755); err != nil {
+ return fmt.Errorf("can't create tmp dir %s, error %v", tmpDir, err)
+ }
+ pivotDir, err := ioutil.TempDir(tmpDir, ".pivot_root")
+ if err != nil {
+ return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err)
+ }
+ if err := syscall.PivotRoot(rootfs, pivotDir); err != nil {
+ return fmt.Errorf("pivot_root %s", err)
+ }
+ if err := syscall.Chdir("/"); err != nil {
+ return fmt.Errorf("chdir / %s", err)
+ }
+ // path to pivot dir now changed, update
+ pivotDir = filepath.Join(pivotBaseDir, filepath.Base(pivotDir))
+
+ // Make pivotDir rprivate to make sure any of the unmounts don't
+ // propagate to parent.
+ if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
+ return err
+ }
+
+ if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
+ return fmt.Errorf("unmount pivot_root dir %s", err)
+ }
+ return os.Remove(pivotDir)
+}
+
+func msMoveRoot(rootfs string) error {
+ if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
+ return err
+ }
+ if err := syscall.Chroot("."); err != nil {
+ return err
+ }
+ return syscall.Chdir("/")
+}
+
+// createIfNotExists creates a file or a directory only if it does not already exist.
+func createIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
+
+// remountReadonly will bind over the top of an existing path and ensure that it is read-only.
+func remountReadonly(path string) error {
+ for i := 0; i < 5; i++ {
+ if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) {
+ switch err {
+ case syscall.EINVAL:
+ // Probably not a mountpoint, use bind-mount
+ if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil {
+ return err
+ }
+ return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "")
+ case syscall.EBUSY:
+ time.Sleep(100 * time.Millisecond)
+ continue
+ default:
+ return err
+ }
+ }
+ return nil
+ }
+ return fmt.Errorf("unable to mount %s as readonly max retries reached", path)
+}
+
+// maskFile bind mounts /dev/null over the top of the specified path inside a container
+// to avoid security issues from processes reading information from non-namespace aware mounts ( proc/kcore ).
+func maskFile(path string) error {
+ if err := syscall.Mount("/dev/null", path, "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ return nil
+}
+
+// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
+// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
+func writeSystemProperty(key, value string) error {
+ keyPath := strings.Replace(key, ".", "/", -1)
+ return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
+}
+
+func remount(m *configs.Mount, rootfs string) error {
+ var (
+ dest = m.Destination
+ )
+ if !strings.HasPrefix(dest, rootfs) {
+ dest = filepath.Join(rootfs, dest)
+ }
+ if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags|syscall.MS_REMOUNT), ""); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Do the mount operation followed by additional mounts required to take care
+// of propagation flags.
+func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error {
+ var (
+ dest = m.Destination
+ data = label.FormatMountLabel(m.Data, mountLabel)
+ )
+ if !strings.HasPrefix(dest, rootfs) {
+ dest = filepath.Join(rootfs, dest)
+ }
+
+ if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), data); err != nil {
+ return err
+ }
+
+ for _, pflag := range m.PropagationFlags {
+ if err := syscall.Mount("", dest, "", uintptr(pflag), ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/config.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/config.go
new file mode 100644
index 0000000..3b9a759
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/config.go
@@ -0,0 +1,71 @@
+package seccomp
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var operators = map[string]configs.Operator{
+ "SCMP_CMP_NE": configs.NotEqualTo,
+ "SCMP_CMP_LT": configs.LessThan,
+ "SCMP_CMP_LE": configs.LessThanOrEqualTo,
+ "SCMP_CMP_EQ": configs.EqualTo,
+ "SCMP_CMP_GE": configs.GreaterThanOrEqualTo,
+ "SCMP_CMP_GT": configs.GreaterThan,
+ "SCMP_CMP_MASKED_EQ": configs.MaskEqualTo,
+}
+
+var actions = map[string]configs.Action{
+ "SCMP_ACT_KILL": configs.Kill,
+ "SCMP_ACT_ERRNO": configs.Errno,
+ "SCMP_ACT_TRAP": configs.Trap,
+ "SCMP_ACT_ALLOW": configs.Allow,
+ "SCMP_ACT_TRACE": configs.Trace,
+}
+
+var archs = map[string]string{
+ "SCMP_ARCH_X86": "x86",
+ "SCMP_ARCH_X86_64": "amd64",
+ "SCMP_ARCH_X32": "x32",
+ "SCMP_ARCH_ARM": "arm",
+ "SCMP_ARCH_AARCH64": "arm64",
+ "SCMP_ARCH_MIPS": "mips",
+ "SCMP_ARCH_MIPS64": "mips64",
+ "SCMP_ARCH_MIPS64N32": "mips64n32",
+ "SCMP_ARCH_MIPSEL": "mipsel",
+ "SCMP_ARCH_MIPSEL64": "mipsel64",
+ "SCMP_ARCH_MIPSEL64N32": "mipsel64n32",
+}
+
+// ConvertStringToOperator converts a string into a Seccomp comparison operator.
+// Comparison operators use the names they are assigned by Libseccomp's header.
+// Attempting to convert a string that is not a valid operator results in an
+// error.
+func ConvertStringToOperator(in string) (configs.Operator, error) {
+ if op, ok := operators[in]; ok == true {
+ return op, nil
+ }
+ return 0, fmt.Errorf("string %s is not a valid operator for seccomp", in)
+}
+
+// ConvertStringToAction converts a string into a Seccomp rule match action.
+// Actions use the names they are assigned in Libseccomp's header, though some
+// (notable, SCMP_ACT_TRACE) are not available in this implementation and will
+// return errors.
+// Attempting to convert a string that is not a valid action results in an
+// error.
+func ConvertStringToAction(in string) (configs.Action, error) {
+ if act, ok := actions[in]; ok == true {
+ return act, nil
+ }
+ return 0, fmt.Errorf("string %s is not a valid action for seccomp", in)
+}
+
+// ConvertStringToArch converts a string into a Seccomp comparison arch.
+func ConvertStringToArch(in string) (string, error) {
+ if arch, ok := archs[in]; ok == true {
+ return arch, nil
+ }
+ return "", fmt.Errorf("string %s is not a valid arch for seccomp", in)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/fixtures/proc_self_status b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/fixtures/proc_self_status
new file mode 100644
index 0000000..0e0084f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/fixtures/proc_self_status
@@ -0,0 +1,47 @@
+Name: cat
+State: R (running)
+Tgid: 19383
+Ngid: 0
+Pid: 19383
+PPid: 19275
+TracerPid: 0
+Uid: 1000 1000 1000 1000
+Gid: 1000 1000 1000 1000
+FDSize: 256
+Groups: 24 25 27 29 30 44 46 102 104 108 111 1000 1001
+NStgid: 19383
+NSpid: 19383
+NSpgid: 19383
+NSsid: 19275
+VmPeak: 5944 kB
+VmSize: 5944 kB
+VmLck: 0 kB
+VmPin: 0 kB
+VmHWM: 744 kB
+VmRSS: 744 kB
+VmData: 324 kB
+VmStk: 136 kB
+VmExe: 48 kB
+VmLib: 1776 kB
+VmPTE: 32 kB
+VmPMD: 12 kB
+VmSwap: 0 kB
+Threads: 1
+SigQ: 0/30067
+SigPnd: 0000000000000000
+ShdPnd: 0000000000000000
+SigBlk: 0000000000000000
+SigIgn: 0000000000000080
+SigCgt: 0000000000000000
+CapInh: 0000000000000000
+CapPrm: 0000000000000000
+CapEff: 0000000000000000
+CapBnd: 0000003fffffffff
+CapAmb: 0000000000000000
+Seccomp: 0
+Cpus_allowed: f
+Cpus_allowed_list: 0-3
+Mems_allowed: 00000000,00000001
+Mems_allowed_list: 0
+voluntary_ctxt_switches: 0
+nonvoluntary_ctxt_switches: 1
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
new file mode 100644
index 0000000..623e227
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
@@ -0,0 +1,231 @@
+// +build linux,cgo,seccomp
+
+package seccomp
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+ "syscall"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+)
+
+var (
+ actAllow = libseccomp.ActAllow
+ actTrap = libseccomp.ActTrap
+ actKill = libseccomp.ActKill
+ actTrace = libseccomp.ActTrace.SetReturnCode(int16(syscall.EPERM))
+ actErrno = libseccomp.ActErrno.SetReturnCode(int16(syscall.EPERM))
+
+ // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER.
+ SeccompModeFilter = uintptr(2)
+)
+
+// Filters given syscalls in a container, preventing them from being used
+// Started in the container init process, and carried over to all child processes
+// Setns calls, however, require a separate invocation, as they are not children
+// of the init until they join the namespace
+func InitSeccomp(config *configs.Seccomp) error {
+ if config == nil {
+ return fmt.Errorf("cannot initialize Seccomp - nil config passed")
+ }
+
+ defaultAction, err := getAction(config.DefaultAction)
+ if err != nil {
+ return fmt.Errorf("error initializing seccomp - invalid default action")
+ }
+
+ filter, err := libseccomp.NewFilter(defaultAction)
+ if err != nil {
+ return fmt.Errorf("error creating filter: %s", err)
+ }
+
+ // Add extra architectures
+ for _, arch := range config.Architectures {
+ scmpArch, err := libseccomp.GetArchFromString(arch)
+ if err != nil {
+ return err
+ }
+
+ if err := filter.AddArch(scmpArch); err != nil {
+ return err
+ }
+ }
+
+ // Unset no new privs bit
+ if err := filter.SetNoNewPrivsBit(false); err != nil {
+ return fmt.Errorf("error setting no new privileges: %s", err)
+ }
+
+ // Add a rule for each syscall
+ for _, call := range config.Syscalls {
+ if call == nil {
+ return fmt.Errorf("encountered nil syscall while initializing Seccomp")
+ }
+
+ if err = matchCall(filter, call); err != nil {
+ return err
+ }
+ }
+
+ if err = filter.Load(); err != nil {
+ return fmt.Errorf("error loading seccomp filter into kernel: %s", err)
+ }
+
+ return nil
+}
+
+// IsEnabled returns if the kernel has been configured to support seccomp.
+func IsEnabled() bool {
+ // Try to read from /proc/self/status for kernels > 3.8
+ s, err := parseStatusFile("/proc/self/status")
+ if err != nil {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL {
+ return true
+ }
+ }
+ return false
+ }
+ _, ok := s["Seccomp"]
+ return ok
+}
+
+// Convert Libcontainer Action to Libseccomp ScmpAction
+func getAction(act configs.Action) (libseccomp.ScmpAction, error) {
+ switch act {
+ case configs.Kill:
+ return actKill, nil
+ case configs.Errno:
+ return actErrno, nil
+ case configs.Trap:
+ return actTrap, nil
+ case configs.Allow:
+ return actAllow, nil
+ case configs.Trace:
+ return actTrace, nil
+ default:
+ return libseccomp.ActInvalid, fmt.Errorf("invalid action, cannot use in rule")
+ }
+}
+
+// Convert Libcontainer Operator to Libseccomp ScmpCompareOp
+func getOperator(op configs.Operator) (libseccomp.ScmpCompareOp, error) {
+ switch op {
+ case configs.EqualTo:
+ return libseccomp.CompareEqual, nil
+ case configs.NotEqualTo:
+ return libseccomp.CompareNotEqual, nil
+ case configs.GreaterThan:
+ return libseccomp.CompareGreater, nil
+ case configs.GreaterThanOrEqualTo:
+ return libseccomp.CompareGreaterEqual, nil
+ case configs.LessThan:
+ return libseccomp.CompareLess, nil
+ case configs.LessThanOrEqualTo:
+ return libseccomp.CompareLessOrEqual, nil
+ case configs.MaskEqualTo:
+ return libseccomp.CompareMaskedEqual, nil
+ default:
+ return libseccomp.CompareInvalid, fmt.Errorf("invalid operator, cannot use in rule")
+ }
+}
+
+// Convert Libcontainer Arg to Libseccomp ScmpCondition
+func getCondition(arg *configs.Arg) (libseccomp.ScmpCondition, error) {
+ cond := libseccomp.ScmpCondition{}
+
+ if arg == nil {
+ return cond, fmt.Errorf("cannot convert nil to syscall condition")
+ }
+
+ op, err := getOperator(arg.Op)
+ if err != nil {
+ return cond, err
+ }
+
+ return libseccomp.MakeCondition(arg.Index, op, arg.Value, arg.ValueTwo)
+}
+
+// Add a rule to match a single syscall
+func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
+ if call == nil || filter == nil {
+ return fmt.Errorf("cannot use nil as syscall to block")
+ }
+
+ if len(call.Name) == 0 {
+ return fmt.Errorf("empty string is not a valid syscall")
+ }
+
+ // If we can't resolve the syscall, assume it's not supported on this kernel
+ // Ignore it, don't error out
+ callNum, err := libseccomp.GetSyscallFromName(call.Name)
+ if err != nil {
+ log.Printf("Error resolving syscall name %s: %s - ignoring syscall.", call.Name, err)
+ return nil
+ }
+
+ // Convert the call's action to the libseccomp equivalent
+ callAct, err := getAction(call.Action)
+ if err != nil {
+ return err
+ }
+
+ // Unconditional match - just add the rule
+ if len(call.Args) == 0 {
+ if err = filter.AddRule(callNum, callAct); err != nil {
+ return err
+ }
+ } else {
+ // Conditional match - convert the per-arg rules into library format
+ conditions := []libseccomp.ScmpCondition{}
+
+ for _, cond := range call.Args {
+ newCond, err := getCondition(cond)
+ if err != nil {
+ return err
+ }
+
+ conditions = append(conditions, newCond)
+ }
+
+ if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func parseStatusFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ status := make(map[string]string)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ parts := strings.Split(text, ":")
+
+ if len(parts) <= 1 {
+ continue
+ }
+
+ status[parts[0]] = parts[1]
+ }
+ return status, nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go
new file mode 100644
index 0000000..888483e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go
@@ -0,0 +1,24 @@
+// +build !linux !cgo !seccomp
+
+package seccomp
+
+import (
+ "errors"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var ErrSeccompNotEnabled = errors.New("seccomp: config provided but seccomp not supported")
+
+// Seccomp not supported, do nothing
+func InitSeccomp(config *configs.Seccomp) error {
+ if config != nil {
+ return ErrSeccompNotEnabled
+ }
+ return nil
+}
+
+// IsEnabled returns false, because it is not supported.
+func IsEnabled() bool {
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/selinux/selinux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/selinux/selinux.go
new file mode 100644
index 0000000..88d612c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/selinux/selinux.go
@@ -0,0 +1,477 @@
+// +build linux
+
+package selinux
+
+import (
+ "bufio"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+const (
+ Enforcing = 1
+ Permissive = 0
+ Disabled = -1
+ selinuxDir = "/etc/selinux/"
+ selinuxConfig = selinuxDir + "config"
+ selinuxTypeTag = "SELINUXTYPE"
+ selinuxTag = "SELINUX"
+ selinuxPath = "/sys/fs/selinux"
+ xattrNameSelinux = "security.selinux"
+ stRdOnly = 0x01
+)
+
+var (
+ assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
+ mcsList = make(map[string]bool)
+ selinuxfs = "unknown"
+ selinuxEnabled = false // Stores whether selinux is currently enabled
+ selinuxEnabledChecked = false // Stores whether selinux enablement has been checked or established yet
+)
+
+type SELinuxContext map[string]string
+
+// SetDisabled disables selinux support for the package
+func SetDisabled() {
+ selinuxEnabled, selinuxEnabledChecked = false, true
+}
+
+// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
+// filesystem or an empty string if no mountpoint is found. Selinuxfs is
+// a proc-like pseudo-filesystem that exposes the selinux policy API to
+// processes. The existence of an selinuxfs mount is used to determine
+// whether selinux is currently enabled or not.
+func getSelinuxMountPoint() string {
+ if selinuxfs != "unknown" {
+ return selinuxfs
+ }
+ selinuxfs = ""
+
+ mounts, err := mount.GetMounts()
+ if err != nil {
+ return selinuxfs
+ }
+ for _, mount := range mounts {
+ if mount.Fstype == "selinuxfs" {
+ selinuxfs = mount.Mountpoint
+ break
+ }
+ }
+ if selinuxfs != "" {
+ var buf syscall.Statfs_t
+ syscall.Statfs(selinuxfs, &buf)
+ if (buf.Flags & stRdOnly) == 1 {
+ selinuxfs = ""
+ }
+ }
+ return selinuxfs
+}
+
+// SelinuxEnabled returns whether selinux is currently enabled.
+func SelinuxEnabled() bool {
+ if selinuxEnabledChecked {
+ return selinuxEnabled
+ }
+ selinuxEnabledChecked = true
+ if fs := getSelinuxMountPoint(); fs != "" {
+ if con, _ := Getcon(); con != "kernel" {
+ selinuxEnabled = true
+ }
+ }
+ return selinuxEnabled
+}
+
+func readConfig(target string) (value string) {
+ var (
+ val, key string
+ bufin *bufio.Reader
+ )
+
+ in, err := os.Open(selinuxConfig)
+ if err != nil {
+ return ""
+ }
+ defer in.Close()
+
+ bufin = bufio.NewReader(in)
+
+ for done := false; !done; {
+ var line string
+ if line, err = bufin.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return ""
+ }
+ done = true
+ }
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ // Skip blank lines
+ continue
+ }
+ if line[0] == ';' || line[0] == '#' {
+ // Skip comments
+ continue
+ }
+ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
+ if key == target {
+ return strings.Trim(val, "\"")
+ }
+ }
+ }
+ return ""
+}
+
+func getSELinuxPolicyRoot() string {
+ return selinuxDir + readConfig(selinuxTypeTag)
+}
+
+func readCon(name string) (string, error) {
+ var val string
+
+ in, err := os.Open(name)
+ if err != nil {
+ return "", err
+ }
+ defer in.Close()
+
+ _, err = fmt.Fscanf(in, "%s", &val)
+ return val, err
+}
+
+// Setfilecon sets the SELinux label for this path or returns an error.
+func Setfilecon(path string, scon string) error {
+ return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
+}
+
+// Getfilecon returns the SELinux label for this path or returns an error.
+func Getfilecon(path string) (string, error) {
+ con, err := system.Lgetxattr(path, xattrNameSelinux)
+
+ // Trim the NUL byte at the end of the byte buffer, if present.
+ if con[len(con)-1] == '\x00' {
+ con = con[:len(con)-1]
+ }
+ return string(con), err
+}
+
+func Setfscreatecon(scon string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
+}
+
+func Getfscreatecon() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
+}
+
+// Getcon returns the SELinux label of the current process thread, or an error.
+func Getcon() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
+}
+
+// Getpidcon returns the SELinux label of the given pid, or an error.
+func Getpidcon(pid int) (string, error) {
+ return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
+}
+
+func Getexeccon() (string, error) {
+ return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
+}
+
+func writeCon(name string, val string) error {
+ out, err := os.OpenFile(name, os.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ if val != "" {
+ _, err = out.Write([]byte(val))
+ } else {
+ _, err = out.Write(nil)
+ }
+ return err
+}
+
+func Setexeccon(scon string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
+}
+
+func (c SELinuxContext) Get() string {
+ return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
+}
+
+func NewContext(scon string) SELinuxContext {
+ c := make(SELinuxContext)
+
+ if len(scon) != 0 {
+ con := strings.SplitN(scon, ":", 4)
+ c["user"] = con[0]
+ c["role"] = con[1]
+ c["type"] = con[2]
+ c["level"] = con[3]
+ }
+ return c
+}
+
+func ReserveLabel(scon string) {
+ if len(scon) != 0 {
+ con := strings.SplitN(scon, ":", 4)
+ mcsAdd(con[3])
+ }
+}
+
+func selinuxEnforcePath() string {
+ return fmt.Sprintf("%s/enforce", selinuxPath)
+}
+
+func SelinuxGetEnforce() int {
+ var enforce int
+
+ enforceS, err := readCon(selinuxEnforcePath())
+ if err != nil {
+ return -1
+ }
+
+ enforce, err = strconv.Atoi(string(enforceS))
+ if err != nil {
+ return -1
+ }
+ return enforce
+}
+
+func SelinuxSetEnforce(mode int) error {
+ return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode))
+}
+
+func SelinuxGetEnforceMode() int {
+ switch readConfig(selinuxTag) {
+ case "enforcing":
+ return Enforcing
+ case "permissive":
+ return Permissive
+ }
+ return Disabled
+}
+
+func mcsAdd(mcs string) error {
+ if mcsList[mcs] {
+ return fmt.Errorf("MCS Label already exists")
+ }
+ mcsList[mcs] = true
+ return nil
+}
+
+func mcsDelete(mcs string) {
+ mcsList[mcs] = false
+}
+
+func IntToMcs(id int, catRange uint32) string {
+ var (
+ SETSIZE = int(catRange)
+ TIER = SETSIZE
+ ORD = id
+ )
+
+ if id < 1 || id > 523776 {
+ return ""
+ }
+
+ for ORD > TIER {
+ ORD = ORD - TIER
+ TIER -= 1
+ }
+ TIER = SETSIZE - TIER
+ ORD = ORD + TIER
+ return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
+}
+
+func uniqMcs(catRange uint32) string {
+ var (
+ n uint32
+ c1, c2 uint32
+ mcs string
+ )
+
+ for {
+ binary.Read(rand.Reader, binary.LittleEndian, &n)
+ c1 = n % catRange
+ binary.Read(rand.Reader, binary.LittleEndian, &n)
+ c2 = n % catRange
+ if c1 == c2 {
+ continue
+ } else {
+ if c1 > c2 {
+ t := c1
+ c1 = c2
+ c2 = t
+ }
+ }
+ mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
+ if err := mcsAdd(mcs); err != nil {
+ continue
+ }
+ break
+ }
+ return mcs
+}
+
+func FreeLxcContexts(scon string) {
+ if len(scon) != 0 {
+ con := strings.SplitN(scon, ":", 4)
+ mcsDelete(con[3])
+ }
+}
+
+func GetLxcContexts() (processLabel string, fileLabel string) {
+ var (
+ val, key string
+ bufin *bufio.Reader
+ )
+
+ if !SelinuxEnabled() {
+ return "", ""
+ }
+ lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
+ in, err := os.Open(lxcPath)
+ if err != nil {
+ return "", ""
+ }
+ defer in.Close()
+
+ bufin = bufio.NewReader(in)
+
+ for done := false; !done; {
+ var line string
+ if line, err = bufin.ReadString('\n'); err != nil {
+ if err == io.EOF {
+ done = true
+ } else {
+ goto exit
+ }
+ }
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ // Skip blank lines
+ continue
+ }
+ if line[0] == ';' || line[0] == '#' {
+ // Skip comments
+ continue
+ }
+ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
+ if key == "process" {
+ processLabel = strings.Trim(val, "\"")
+ }
+ if key == "file" {
+ fileLabel = strings.Trim(val, "\"")
+ }
+ }
+ }
+
+ if processLabel == "" || fileLabel == "" {
+ return "", ""
+ }
+
+exit:
+ // mcs := IntToMcs(os.Getpid(), 1024)
+ mcs := uniqMcs(1024)
+ scon := NewContext(processLabel)
+ scon["level"] = mcs
+ processLabel = scon.Get()
+ scon = NewContext(fileLabel)
+ scon["level"] = mcs
+ fileLabel = scon.Get()
+ return processLabel, fileLabel
+}
+
+func SecurityCheckContext(val string) error {
+ return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
+}
+
+func CopyLevel(src, dest string) (string, error) {
+ if src == "" {
+ return "", nil
+ }
+ if err := SecurityCheckContext(src); err != nil {
+ return "", err
+ }
+ if err := SecurityCheckContext(dest); err != nil {
+ return "", err
+ }
+ scon := NewContext(src)
+ tcon := NewContext(dest)
+ mcsDelete(tcon["level"])
+ mcsAdd(scon["level"])
+ tcon["level"] = scon["level"]
+ return tcon.Get(), nil
+}
+
+// Prevent users from relabing system files
+func badPrefix(fpath string) error {
+ var badprefixes = []string{"/usr"}
+
+ for _, prefix := range badprefixes {
+ if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
+ return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
+ }
+ }
+ return nil
+}
+
+// Change the fpath file object to the SELinux label scon.
+// If the fpath is a directory and recurse is true Chcon will walk the
+// directory tree setting the label
+func Chcon(fpath string, scon string, recurse bool) error {
+ if scon == "" {
+ return nil
+ }
+ if err := badPrefix(fpath); err != nil {
+ return err
+ }
+ callback := func(p string, info os.FileInfo, err error) error {
+ return Setfilecon(p, scon)
+ }
+
+ if recurse {
+ return filepath.Walk(fpath, callback)
+ }
+
+ return Setfilecon(fpath, scon)
+}
+
+// DupSecOpt takes an SELinux process label and returns security options that
+// can will set the SELinux Type and Level for future container processes
+func DupSecOpt(src string) []string {
+ if src == "" {
+ return nil
+ }
+ con := NewContext(src)
+ if con["user"] == "" ||
+ con["role"] == "" ||
+ con["type"] == "" ||
+ con["level"] == "" {
+ return nil
+ }
+ return []string{"label:user:" + con["user"],
+ "label:role:" + con["role"],
+ "label:type:" + con["type"],
+ "label:level:" + con["level"]}
+}
+
+// DisableSecOpt returns a security opt that can be used to disabling SELinux
+// labeling support for future container processes
+func DisableSecOpt() []string {
+ return []string{"label:disable"}
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/setgroups_linux.go
new file mode 100644
index 0000000..c7bdb60
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/setgroups_linux.go
@@ -0,0 +1,11 @@
+// +build linux,go1.5
+
+package libcontainer
+
+import "syscall"
+
+// Set the GidMappingsEnableSetgroups member to true, so the process's
+// setgroups proc entry wont be set to 'deny' if GidMappings are set
+func enableSetgroups(sys *syscall.SysProcAttr) {
+ sys.GidMappingsEnableSetgroups = true
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/setns_init_linux.go
new file mode 100644
index 0000000..2bde44f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/setns_init_linux.go
@@ -0,0 +1,44 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/apparmor"
+ "github.com/opencontainers/runc/libcontainer/label"
+ "github.com/opencontainers/runc/libcontainer/seccomp"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+// linuxSetnsInit performs the container's initialization for running a new process
+// inside an existing container.
+type linuxSetnsInit struct {
+ config *initConfig
+}
+
+func (l *linuxSetnsInit) Init() error {
+ if err := setupRlimits(l.config.Config); err != nil {
+ return err
+ }
+ if err := setOomScoreAdj(l.config.Config.OomScoreAdj); err != nil {
+ return err
+ }
+ if l.config.Config.Seccomp != nil {
+ if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
+ return err
+ }
+ }
+ if err := finalizeNamespace(l.config); err != nil {
+ return err
+ }
+ if err := apparmor.ApplyProfile(l.config.Config.AppArmorProfile); err != nil {
+ return err
+ }
+ if l.config.Config.ProcessLabel != "" {
+ if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil {
+ return err
+ }
+ }
+ return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ())
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go
new file mode 100644
index 0000000..5ee6e37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go
@@ -0,0 +1,27 @@
+package stacktrace
+
+import "runtime"
+
+// Caputure captures a stacktrace for the current calling go program
+//
+// skip is the number of frames to skip
+func Capture(userSkip int) Stacktrace {
+ var (
+ skip = userSkip + 1 // add one for our own function
+ frames []Frame
+ prevPc uintptr = 0
+ )
+ for i := skip; ; i++ {
+ pc, file, line, ok := runtime.Caller(i)
+ //detect if caller is repeated to avoid loop, gccgo
+ //currently runs into a loop without this check
+ if !ok || pc == prevPc {
+ break
+ }
+ frames = append(frames, NewFrame(pc, file, line))
+ prevPc = pc
+ }
+ return Stacktrace{
+ Frames: frames,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go
new file mode 100644
index 0000000..0d590d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go
@@ -0,0 +1,38 @@
+package stacktrace
+
+import (
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// NewFrame returns a new stack frame for the provided information
+func NewFrame(pc uintptr, file string, line int) Frame {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return Frame{}
+ }
+ pack, name := parseFunctionName(fn.Name())
+ return Frame{
+ Line: line,
+ File: filepath.Base(file),
+ Package: pack,
+ Function: name,
+ }
+}
+
+func parseFunctionName(name string) (string, string) {
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return "", name
+ }
+ return name[:i], name[i+1:]
+}
+
+// Frame contains all the information for a stack frame within a go program
+type Frame struct {
+ File string
+ Function string
+ Package string
+ Line int
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go
new file mode 100644
index 0000000..5e8b58d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go
@@ -0,0 +1,5 @@
+package stacktrace
+
+type Stacktrace struct {
+ Frames []Frame
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/standard_init_linux.go
new file mode 100644
index 0000000..d3b5086
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/standard_init_linux.go
@@ -0,0 +1,119 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ "github.com/opencontainers/runc/libcontainer/apparmor"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/label"
+ "github.com/opencontainers/runc/libcontainer/seccomp"
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+type linuxStandardInit struct {
+ pipe io.ReadWriter
+ parentPid int
+ config *initConfig
+}
+
+func (l *linuxStandardInit) Init() error {
+ // join any namespaces via a path to the namespace fd if provided
+ if err := joinExistingNamespaces(l.config.Config.Namespaces); err != nil {
+ return err
+ }
+ var console *linuxConsole
+ if l.config.Console != "" {
+ console = newConsoleFromPath(l.config.Console)
+ if err := console.dupStdio(); err != nil {
+ return err
+ }
+ }
+ if _, err := syscall.Setsid(); err != nil {
+ return err
+ }
+ if console != nil {
+ if err := system.Setctty(); err != nil {
+ return err
+ }
+ }
+ if err := setupNetwork(l.config); err != nil {
+ return err
+ }
+ if err := setupRoute(l.config.Config); err != nil {
+ return err
+ }
+ if err := setupRlimits(l.config.Config); err != nil {
+ return err
+ }
+ if err := setOomScoreAdj(l.config.Config.OomScoreAdj); err != nil {
+ return err
+ }
+ label.Init()
+ // InitializeMountNamespace() can be executed only for a new mount namespace
+ if l.config.Config.Namespaces.Contains(configs.NEWNS) {
+ if err := setupRootfs(l.config.Config, console); err != nil {
+ return err
+ }
+ }
+ if hostname := l.config.Config.Hostname; hostname != "" {
+ if err := syscall.Sethostname([]byte(hostname)); err != nil {
+ return err
+ }
+ }
+ if err := apparmor.ApplyProfile(l.config.Config.AppArmorProfile); err != nil {
+ return err
+ }
+ if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil {
+ return err
+ }
+
+ for key, value := range l.config.Config.Sysctl {
+ if err := writeSystemProperty(key, value); err != nil {
+ return err
+ }
+ }
+ for _, path := range l.config.Config.ReadonlyPaths {
+ if err := remountReadonly(path); err != nil {
+ return err
+ }
+ }
+ for _, path := range l.config.Config.MaskPaths {
+ if err := maskFile(path); err != nil {
+ return err
+ }
+ }
+ pdeath, err := system.GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ // Tell our parent that we're ready to Execv. This must be done before the
+ // Seccomp rules have been applied, because we need to be able to read and
+ // write to a socket.
+ if err := syncParentReady(l.pipe); err != nil {
+ return err
+ }
+ if l.config.Config.Seccomp != nil {
+ if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
+ return err
+ }
+ }
+ if err := finalizeNamespace(l.config); err != nil {
+ return err
+ }
+ // finalizeNamespace can change user/group which clears the parent death
+ // signal, so we restore it here.
+ if err := pdeath.Restore(); err != nil {
+ return err
+ }
+ // compare the parent from the inital start of the init process and make sure that it did not change.
+ // if the parent changes that means it died and we were reparened to something else so we should
+ // just kill ourself and not cause problems for someone else.
+ if syscall.Getppid() != l.parentPid {
+ return syscall.Kill(syscall.Getpid(), syscall.SIGKILL)
+ }
+ return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ())
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/state_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/state_linux.go
new file mode 100644
index 0000000..fb71ef9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/state_linux.go
@@ -0,0 +1,223 @@
+// +build linux
+
+package libcontainer
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+func newStateTransitionError(from, to containerState) error {
+ return &stateTransitionError{
+ From: from.status().String(),
+ To: to.status().String(),
+ }
+}
+
+// stateTransitionError is returned when an invalid state transition happens from one
+// state to another.
+type stateTransitionError struct {
+ From string
+ To string
+}
+
+func (s *stateTransitionError) Error() string {
+ return fmt.Sprintf("invalid state transition from %s to %s", s.From, s.To)
+}
+
+type containerState interface {
+ transition(containerState) error
+ destroy() error
+ status() Status
+}
+
+func destroy(c *linuxContainer) error {
+ if !c.config.Namespaces.Contains(configs.NEWPID) {
+ if err := killCgroupProcesses(c.cgroupManager); err != nil {
+ logrus.Warn(err)
+ }
+ }
+ err := c.cgroupManager.Destroy()
+ if rerr := os.RemoveAll(c.root); err == nil {
+ err = rerr
+ }
+ c.initProcess = nil
+ if herr := runPoststopHooks(c); err == nil {
+ err = herr
+ }
+ c.state = &stoppedState{c: c}
+ return err
+}
+
+func runPoststopHooks(c *linuxContainer) error {
+ if c.config.Hooks != nil {
+ s := configs.HookState{
+ Version: c.config.Version,
+ ID: c.id,
+ Root: c.config.Rootfs,
+ }
+ for _, hook := range c.config.Hooks.Poststop {
+ if err := hook.Run(s); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// stoppedState represents a container is a stopped/destroyed state.
+type stoppedState struct {
+ c *linuxContainer
+}
+
+func (b *stoppedState) status() Status {
+ return Destroyed
+}
+
+func (b *stoppedState) transition(s containerState) error {
+ switch s.(type) {
+ case *runningState:
+ b.c.state = s
+ return nil
+ case *restoredState:
+ b.c.state = s
+ return nil
+ case *stoppedState:
+ return nil
+ }
+ return newStateTransitionError(b, s)
+}
+
+func (b *stoppedState) destroy() error {
+ return destroy(b.c)
+}
+
+// runningState represents a container that is currently running.
+type runningState struct {
+ c *linuxContainer
+}
+
+func (r *runningState) status() Status {
+ return Running
+}
+
+func (r *runningState) transition(s containerState) error {
+ switch s.(type) {
+ case *stoppedState:
+ running, err := r.c.isRunning()
+ if err != nil {
+ return err
+ }
+ if running {
+ return newGenericError(fmt.Errorf("container still running"), ContainerNotStopped)
+ }
+ r.c.state = s
+ return nil
+ case *pausedState:
+ r.c.state = s
+ return nil
+ case *runningState:
+ return nil
+ }
+ return newStateTransitionError(r, s)
+}
+
+func (r *runningState) destroy() error {
+ running, err := r.c.isRunning()
+ if err != nil {
+ return err
+ }
+ if running {
+ return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped)
+ }
+ return destroy(r.c)
+}
+
+// pausedState represents a container that is currently pause. It cannot be destroyed in a
+// paused state and must transition back to running first.
+type pausedState struct {
+ c *linuxContainer
+}
+
+func (p *pausedState) status() Status {
+ return Paused
+}
+
+func (p *pausedState) transition(s containerState) error {
+ switch s.(type) {
+ case *runningState, *stoppedState:
+ p.c.state = s
+ return nil
+ case *pausedState:
+ return nil
+ }
+ return newStateTransitionError(p, s)
+}
+
+func (p *pausedState) destroy() error {
+ isRunning, err := p.c.isRunning()
+ if err != nil {
+ return err
+ }
+ if !isRunning {
+ if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil {
+ return err
+ }
+ return destroy(p.c)
+ }
+ return newGenericError(fmt.Errorf("container is paused"), ContainerPaused)
+}
+
+// restoredState is the same as the running state but also has accociated checkpoint
+// information that maybe need destroyed when the container is stopped and destory is called.
+type restoredState struct {
+ imageDir string
+ c *linuxContainer
+}
+
+func (r *restoredState) status() Status {
+ return Running
+}
+
+func (r *restoredState) transition(s containerState) error {
+ switch s.(type) {
+ case *stoppedState:
+ return nil
+ case *runningState:
+ return nil
+ }
+ return newStateTransitionError(r, s)
+}
+
+func (r *restoredState) destroy() error {
+ if _, err := os.Stat(filepath.Join(r.c.root, "checkpoint")); err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+ return destroy(r.c)
+}
+
+// createdState is used whenever a container is restored, loaded, or setting additional
+// processes inside and it should not be destroyed when it is exiting.
+type createdState struct {
+ c *linuxContainer
+ s Status
+}
+
+func (n *createdState) status() Status {
+ return n.s
+}
+
+func (n *createdState) transition(s containerState) error {
+ n.c.state = s
+ return nil
+}
+
+func (n *createdState) destroy() error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats.go
new file mode 100644
index 0000000..303e4b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats.go
@@ -0,0 +1,15 @@
+package libcontainer
+
+type NetworkInterface struct {
+ // Name is the name of the network interface.
+ Name string
+
+ RxBytes uint64
+ RxPackets uint64
+ RxErrors uint64
+ RxDropped uint64
+ TxBytes uint64
+ TxPackets uint64
+ TxErrors uint64
+ TxDropped uint64
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_freebsd.go
new file mode 100644
index 0000000..f8d1d68
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_freebsd.go
@@ -0,0 +1,5 @@
+package libcontainer
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_linux.go
new file mode 100644
index 0000000..c629dc6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_linux.go
@@ -0,0 +1,8 @@
+package libcontainer
+
+import "github.com/opencontainers/runc/libcontainer/cgroups"
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+ CgroupStats *cgroups.Stats
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_windows.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_windows.go
new file mode 100644
index 0000000..f8d1d68
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/stats_windows.go
@@ -0,0 +1,5 @@
+package libcontainer
+
+type Stats struct {
+ Interfaces []*NetworkInterface
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/linux.go
new file mode 100644
index 0000000..6c835e6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/linux.go
@@ -0,0 +1,114 @@
+// +build linux
+
+package system
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+ "unsafe"
+)
+
+type ParentDeathSignal int
+
+func (p ParentDeathSignal) Restore() error {
+ if p == 0 {
+ return nil
+ }
+ current, err := GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ if p == current {
+ return nil
+ }
+ return p.Set()
+}
+
+func (p ParentDeathSignal) Set() error {
+ return SetParentDeathSignal(uintptr(p))
+}
+
+func Execv(cmd string, args []string, env []string) error {
+ name, err := exec.LookPath(cmd)
+ if err != nil {
+ return err
+ }
+
+ return syscall.Exec(name, args, env)
+}
+
+func SetParentDeathSignal(sig uintptr) error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func GetParentDeathSignal() (ParentDeathSignal, error) {
+ var sig int
+ _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0)
+ if err != 0 {
+ return -1, err
+ }
+ return ParentDeathSignal(sig), nil
+}
+
+func SetKeepCaps() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+func ClearKeepCaps() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 {
+ return err
+ }
+
+ return nil
+}
+
+func Setctty() error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+/*
+ * Detect whether we are currently running in a user namespace.
+ * Copied from github.com/lxc/lxd/shared/util.go
+ */
+func RunningInUserNS() bool {
+ file, err := os.Open("/proc/self/uid_map")
+ if err != nil {
+ /*
+ * This kernel-provided file only exists if user namespaces are
+ * supported
+ */
+ return false
+ }
+ defer file.Close()
+
+ buf := bufio.NewReader(file)
+ l, _, err := buf.ReadLine()
+ if err != nil {
+ return false
+ }
+
+ line := string(l)
+ var a, b, c int64
+ fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if a == 0 && b == 0 && c == 4294967295 {
+ return false
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/proc.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/proc.go
new file mode 100644
index 0000000..37808a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/proc.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// look in /proc to find the process start time so that we can verify
+// that this pid has started after ourself
+func GetProcessStartTime(pid int) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
+ if err != nil {
+ return "", err
+ }
+
+ parts := strings.Split(string(data), " ")
+ // the starttime is located at pos 22
+ // from the man page
+ //
+ // starttime %llu (was %lu before Linux 2.6)
+ // (22) The time the process started after system boot. In kernels before Linux 2.6, this
+ // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks
+ // (divide by sysconf(_SC_CLK_TCK)).
+ return parts[22-1], nil // starts at 1
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/setns_linux.go
new file mode 100644
index 0000000..615ff4c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/setns_linux.go
@@ -0,0 +1,40 @@
+package system
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092
+//
+// We need different setns values for the different platforms and arch
+// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
+var setNsMap = map[string]uintptr{
+ "linux/386": 346,
+ "linux/arm64": 268,
+ "linux/amd64": 308,
+ "linux/arm": 375,
+ "linux/ppc": 350,
+ "linux/ppc64": 350,
+ "linux/ppc64le": 350,
+ "linux/s390x": 339,
+}
+
+var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+
+func SysSetns() uint32 {
+ return uint32(sysSetns)
+}
+
+func Setns(fd uintptr, flags uintptr) error {
+ ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+ if !exists {
+ return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+ _, _, err := syscall.RawSyscall(ns, fd, flags, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
new file mode 100644
index 0000000..c990065
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go
@@ -0,0 +1,25 @@
+// +build linux,386
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
new file mode 100644
index 0000000..0816bf8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
@@ -0,0 +1,25 @@
+// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
new file mode 100644
index 0000000..3f780f3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go
@@ -0,0 +1,25 @@
+// +build linux,arm
+
+package system
+
+import (
+ "syscall"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
new file mode 100644
index 0000000..b3a07cb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/sysconfig.go
@@ -0,0 +1,12 @@
+// +build cgo,linux cgo,freebsd
+
+package system
+
+/*
+#include
+*/
+import "C"
+
+func GetClockTicks() int {
+ return int(C.sysconf(C._SC_CLK_TCK))
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
new file mode 100644
index 0000000..d93b5d5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go
@@ -0,0 +1,15 @@
+// +build !cgo windows
+
+package system
+
+func GetClockTicks() int {
+ // TODO figure out a better alternative for platforms where we're missing cgo
+ //
+ // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency().
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx
+ //
+ // An example of its usage can be found here.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
+
+ return 100
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
new file mode 100644
index 0000000..30f74df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
@@ -0,0 +1,99 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _zero uintptr
+
+// Returns the size of xattrs and nil error
+// Requires path, takes allocated []byte or nil as last argument
+func Llistxattr(path string, dest []byte) (size int, err error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return -1, err
+ }
+ var newpathBytes unsafe.Pointer
+ if len(dest) > 0 {
+ newpathBytes = unsafe.Pointer(&dest[0])
+ } else {
+ newpathBytes = unsafe.Pointer(&_zero)
+ }
+
+ _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0)
+ size = int(_size)
+ if errno != 0 {
+ return -1, errno
+ }
+
+ return size, nil
+}
+
+// Returns a []byte slice if the xattr is set and nil otherwise
+// Requires path and its attribute as arguments
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ var sz int
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start with a 128 length byte array
+ sz = 128
+ dest := make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+
+ switch {
+ case errno == syscall.ENODATA:
+ return nil, errno
+ case errno == syscall.ENOTSUP:
+ return nil, errno
+ case errno == syscall.ERANGE:
+ // 128 byte array might just not be good enough,
+ // A dummy buffer is used ``uintptr(0)`` to get real size
+ // of the xattrs on disk
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0)
+ sz = int(_sz)
+ if sz < 0 {
+ return nil, errno
+ }
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno != 0 {
+ return nil, errno
+ }
+ case errno != 0:
+ return nil, errno
+ }
+ sz = int(_sz)
+ return dest[:sz], nil
+}
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
new file mode 100644
index 0000000..edbe200
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
@@ -0,0 +1,2 @@
+Tianon Gravi (@tianon)
+Aleksa Sarai (@cyphar)
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go
new file mode 100644
index 0000000..6f8a982
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup.go
@@ -0,0 +1,108 @@
+package user
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+)
+
+var (
+ // The current operating system does not provide the required data for user lookups.
+ ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
+)
+
+func lookupUser(filter func(u User) bool) (User, error) {
+ // Get operating system-specific passwd reader-closer.
+ passwd, err := GetPasswd()
+ if err != nil {
+ return User{}, err
+ }
+ defer passwd.Close()
+
+ // Get the users.
+ users, err := ParsePasswdFilter(passwd, filter)
+ if err != nil {
+ return User{}, err
+ }
+
+ // No user entries found.
+ if len(users) == 0 {
+ return User{}, fmt.Errorf("no matching entries in passwd file")
+ }
+
+ // Assume the first entry is the "correct" one.
+ return users[0], nil
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(syscall.Getuid())
+}
+
+// LookupUser looks up a user by their username in /etc/passwd. If the user
+// cannot be found (or there is no /etc/passwd file on the filesystem), then
+// LookupUser returns an error.
+func LookupUser(username string) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Name == username
+ })
+}
+
+// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
+// be found (or there is no /etc/passwd file on the filesystem), then LookupId
+// returns an error.
+func LookupUid(uid int) (User, error) {
+ return lookupUser(func(u User) bool {
+ return u.Uid == uid
+ })
+}
+
+func lookupGroup(filter func(g Group) bool) (Group, error) {
+ // Get operating system-specific group reader-closer.
+ group, err := GetGroup()
+ if err != nil {
+ return Group{}, err
+ }
+ defer group.Close()
+
+ // Get the users.
+ groups, err := ParseGroupFilter(group, filter)
+ if err != nil {
+ return Group{}, err
+ }
+
+ // No user entries found.
+ if len(groups) == 0 {
+ return Group{}, fmt.Errorf("no matching entries in group file")
+ }
+
+ // Assume the first entry is the "correct" one.
+ return groups[0], nil
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(syscall.Getgid())
+}
+
+// LookupGroup looks up a group by its name in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGroup
+// returns an error.
+func LookupGroup(groupname string) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Name == groupname
+ })
+}
+
+// LookupGid looks up a group by its group id in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGid
+// returns an error.
+func LookupGid(gid int) (Group, error) {
+ return lookupGroup(func(g Group) bool {
+ return g.Gid == gid
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
new file mode 100644
index 0000000..758b734
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -0,0 +1,30 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package user
+
+import (
+ "io"
+ "os"
+)
+
+// Unix-specific path to the passwd and group formatted files.
+const (
+ unixPasswdPath = "/etc/passwd"
+ unixGroupPath = "/etc/group"
+)
+
+func GetPasswdPath() (string, error) {
+ return unixPasswdPath, nil
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return os.Open(unixPasswdPath)
+}
+
+func GetGroupPath() (string, error) {
+ return unixGroupPath, nil
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return os.Open(unixGroupPath)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
new file mode 100644
index 0000000..7217948
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
@@ -0,0 +1,21 @@
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package user
+
+import "io"
+
+func GetPasswdPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
+
+func GetGroupPath() (string, error) {
+ return "", ErrUnsupported
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return nil, ErrUnsupported
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go
new file mode 100644
index 0000000..e6375ea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/user/user.go
@@ -0,0 +1,418 @@
+package user
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ minId = 0
+ maxId = 1<<31 - 1 //for 32-bit systems compatibility
+)
+
+var (
+ ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId)
+)
+
+type User struct {
+ Name string
+ Pass string
+ Uid int
+ Gid int
+ Gecos string
+ Home string
+ Shell string
+}
+
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ List []string
+}
+
+func parseLine(line string, v ...interface{}) {
+ if line == "" {
+ return
+ }
+
+ parts := strings.Split(line, ":")
+ for i, p := range parts {
+ if len(v) <= i {
+ // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files
+ break
+ }
+
+ switch e := v[i].(type) {
+ case *string:
+ // "root", "adm", "/bin/bash"
+ *e = p
+ case *int:
+ // "0", "4", "1000"
+ // ignore string to int conversion errors, for great "tolerance" of naughty configuration files
+ *e, _ = strconv.Atoi(p)
+ case *[]string:
+ // "", "root", "root,adm,daemon"
+ if p != "" {
+ *e = strings.Split(p, ",")
+ } else {
+ *e = []string{}
+ }
+ default:
+ // panic, because this is a programming/logic error, not a runtime one
+ panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!")
+ }
+ }
+}
+
+func ParsePasswdFile(path string) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswd(passwd)
+}
+
+func ParsePasswd(passwd io.Reader) ([]User, error) {
+ return ParsePasswdFilter(passwd, nil)
+}
+
+func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswdFilter(passwd, filter)
+}
+
+func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for passwd-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []User{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 passwd
+ // name:password:UID:GID:GECOS:directory:shell
+ // Name:Pass:Uid:Gid:Gecos:Home:Shell
+ // root:x:0:0:root:/root:/bin/bash
+ // adm:x:3:4:adm:/var/adm:/bin/false
+ p := User{}
+ parseLine(
+ text,
+ &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell,
+ )
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+func ParseGroupFile(path string) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroup(group)
+}
+
+func ParseGroup(group io.Reader) ([]Group, error) {
+ return ParseGroupFilter(group, nil)
+}
+
+func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroupFilter(group, filter)
+}
+
+func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for group-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []Group{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 group
+ // group_name:password:GID:user_list
+ // Name:Pass:Gid:List
+ // root:x:0:root
+ // adm:x:4:root,adm,daemon
+ p := Group{}
+ parseLine(
+ text,
+ &p.Name, &p.Pass, &p.Gid, &p.List,
+ )
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+
+ return out, nil
+}
+
+type ExecUser struct {
+ Uid, Gid int
+ Sgids []int
+ Home string
+}
+
+// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
+// given file paths and uses that data as the arguments to GetExecUser. If the
+// files cannot be opened for any reason, the error is ignored and a nil
+// io.Reader is passed instead.
+func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
+ passwd, err := os.Open(passwdPath)
+ if err != nil {
+ passwd = nil
+ } else {
+ defer passwd.Close()
+ }
+
+ group, err := os.Open(groupPath)
+ if err != nil {
+ group = nil
+ } else {
+ defer group.Close()
+ }
+
+ return GetExecUser(userSpec, defaults, passwd, group)
+}
+
+// GetExecUser parses a user specification string (using the passwd and group
+// readers as sources for /etc/passwd and /etc/group data, respectively). In
+// the case of blank fields or missing data from the sources, the values in
+// defaults is used.
+//
+// GetExecUser will return an error if a user or group literal could not be
+// found in any entry in passwd and group respectively.
+//
+// Examples of valid user specifications are:
+// * ""
+// * "user"
+// * "uid"
+// * "user:group"
+// * "uid:gid
+// * "user:gid"
+// * "uid:group"
+func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
+ var (
+ userArg, groupArg string
+ name string
+ )
+
+ if defaults == nil {
+ defaults = new(ExecUser)
+ }
+
+ // Copy over defaults.
+ user := &ExecUser{
+ Uid: defaults.Uid,
+ Gid: defaults.Gid,
+ Sgids: defaults.Sgids,
+ Home: defaults.Home,
+ }
+
+ // Sgids slice *cannot* be nil.
+ if user.Sgids == nil {
+ user.Sgids = []int{}
+ }
+
+ // allow for userArg to have either "user" syntax, or optionally "user:group" syntax
+ parseLine(userSpec, &userArg, &groupArg)
+
+ users, err := ParsePasswdFilter(passwd, func(u User) bool {
+ if userArg == "" {
+ return u.Uid == user.Uid
+ }
+ return u.Name == userArg || strconv.Itoa(u.Uid) == userArg
+ })
+ if err != nil && passwd != nil {
+ if userArg == "" {
+ userArg = strconv.Itoa(user.Uid)
+ }
+ return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err)
+ }
+
+ haveUser := users != nil && len(users) > 0
+ if haveUser {
+ // if we found any user entries that matched our filter, let's take the first one as "correct"
+ name = users[0].Name
+ user.Uid = users[0].Uid
+ user.Gid = users[0].Gid
+ user.Home = users[0].Home
+ } else if userArg != "" {
+ // we asked for a user but didn't find them... let's check to see if we wanted a numeric user
+ user.Uid, err = strconv.Atoi(userArg)
+ if err != nil {
+ // not numeric - we have to bail
+ return nil, fmt.Errorf("Unable to find user %v", userArg)
+ }
+
+ // Must be inside valid uid range.
+ if user.Uid < minId || user.Uid > maxId {
+ return nil, ErrRange
+ }
+
+ // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit
+ }
+
+ if groupArg != "" || name != "" {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ // Explicit group format takes precedence.
+ if groupArg != "" {
+ return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg
+ }
+
+ // Check if user is a member.
+ for _, u := range g.List {
+ if u == name {
+ return true
+ }
+ }
+
+ return false
+ })
+ if err != nil && group != nil {
+ return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
+ }
+
+ haveGroup := groups != nil && len(groups) > 0
+ if groupArg != "" {
+ if haveGroup {
+ // if we found any group entries that matched our filter, let's take the first one as "correct"
+ user.Gid = groups[0].Gid
+ } else {
+ // we asked for a group but didn't find id... let's check to see if we wanted a numeric group
+ user.Gid, err = strconv.Atoi(groupArg)
+ if err != nil {
+ // not numeric - we have to bail
+ return nil, fmt.Errorf("Unable to find group %v", groupArg)
+ }
+
+ // Ensure gid is inside gid range.
+ if user.Gid < minId || user.Gid > maxId {
+ return nil, ErrRange
+ }
+
+ // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit
+ }
+ } else if haveGroup {
+ // If implicit group format, fill supplementary gids.
+ user.Sgids = make([]int, len(groups))
+ for i, group := range groups {
+ user.Sgids[i] = group.Gid
+ }
+ }
+ }
+
+ return user, nil
+}
+
+// GetAdditionalGroups looks up a list of groups by name or group id
+// against the given /etc/group formatted data. If a group name cannot
+// be found, an error will be returned. If a group id cannot be found,
+// or the given group data is nil, the id will be returned as-is
+// provided it is in the legal range.
+func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
+ var groups = []Group{}
+ if group != nil {
+ var err error
+ groups, err = ParseGroupFilter(group, func(g Group) bool {
+ for _, ag := range additionalGroups {
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ }
+ }
+
+ gidMap := make(map[int]struct{})
+ for _, ag := range additionalGroups {
+ var found bool
+ for _, g := range groups {
+ // if we found a matched group either by name or gid, take the
+ // first matched as correct
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ if _, ok := gidMap[g.Gid]; !ok {
+ gidMap[g.Gid] = struct{}{}
+ found = true
+ break
+ }
+ }
+ }
+ // we asked for a group but didn't find it. let's check to see
+ // if we wanted a numeric group
+ if !found {
+ gid, err := strconv.Atoi(ag)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find group %s", ag)
+ }
+ // Ensure gid is inside gid range.
+ if gid < minId || gid > maxId {
+ return nil, ErrRange
+ }
+ gidMap[gid] = struct{}{}
+ }
+ }
+ gids := []int{}
+ for gid := range gidMap {
+ gids = append(gids, gid)
+ }
+ return gids, nil
+}
+
+// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
+// that opens the groupPath given and gives it as an argument to
+// GetAdditionalGroups.
+func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
+ group, err := os.Open(groupPath)
+ if err == nil {
+ defer group.Close()
+ }
+ return GetAdditionalGroups(additionalGroups, group)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/utils/utils.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/utils/utils.go
new file mode 100644
index 0000000..1378006
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -0,0 +1,56 @@
+package utils
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "encoding/json"
+ "io"
+ "path/filepath"
+ "syscall"
+)
+
+const (
+ exitSignalOffset = 128
+)
+
+// GenerateRandomName returns a new name joined with a prefix. This size
+// specified is used to truncate the randomly generated value
+func GenerateRandomName(prefix string, size int) (string, error) {
+ id := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, id); err != nil {
+ return "", err
+ }
+ if size > 64 {
+ size = 64
+ }
+ return prefix + hex.EncodeToString(id)[:size], nil
+}
+
+// ResolveRootfs ensures that the current working directory is
+// not a symlink and returns the absolute path to the rootfs
+func ResolveRootfs(uncleanRootfs string) (string, error) {
+ rootfs, err := filepath.Abs(uncleanRootfs)
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(rootfs)
+}
+
+// ExitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func ExitStatus(status syscall.WaitStatus) int {
+ if status.Signaled() {
+ return exitSignalOffset + int(status.Signal())
+ }
+ return status.ExitStatus()
+}
+
+// WriteJSON writes the provided struct v to w using standard json marshaling
+func WriteJSON(w io.Writer, v interface{}) error {
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
new file mode 100644
index 0000000..408918f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -0,0 +1,33 @@
+// +build !windows
+
+package utils
+
+import (
+ "io/ioutil"
+ "strconv"
+ "syscall"
+)
+
+func CloseExecFrom(minFd int) error {
+ fdList, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return err
+ }
+ for _, fi := range fdList {
+ fd, err := strconv.Atoi(fi.Name())
+ if err != nil {
+ // ignore non-numeric file names
+ continue
+ }
+
+ if fd < minFd {
+ // ignore descriptors lower than our specified minimum
+ continue
+ }
+
+ // intentionally ignore errors from syscall.CloseOnExec
+ syscall.CloseOnExec(fd)
+ // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall)
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/errors.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/errors.go
new file mode 100644
index 0000000..8cd7741
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/errors.go
@@ -0,0 +1,8 @@
+package xattr
+
+import (
+ "fmt"
+ "runtime"
+)
+
+var ErrNotSupportedPlatform = fmt.Errorf("platform and architecture is not supported %s %s", runtime.GOOS, runtime.GOARCH)
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/xattr_linux.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/xattr_linux.go
new file mode 100644
index 0000000..933a752
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/xattr_linux.go
@@ -0,0 +1,53 @@
+// +build linux
+
+package xattr
+
+import (
+ "syscall"
+
+ "github.com/opencontainers/runc/libcontainer/system"
+)
+
+func XattrEnabled(path string) bool {
+ if Setxattr(path, "user.test", "") == syscall.ENOTSUP {
+ return false
+ }
+ return true
+}
+
+func stringsfromByte(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
+
+func Listxattr(path string) ([]string, error) {
+ size, err := system.Llistxattr(path, nil)
+ if err != nil {
+ return nil, err
+ }
+ buf := make([]byte, size)
+ read, err := system.Llistxattr(path, buf)
+ if err != nil {
+ return nil, err
+ }
+ names := stringsfromByte(buf[:read])
+ return names, nil
+}
+
+func Getxattr(path, attr string) (string, error) {
+ value, err := system.Lgetxattr(path, attr)
+ if err != nil {
+ return "", err
+ }
+ return string(value), nil
+}
+
+func Setxattr(path, xattr, value string) error {
+ return system.Lsetxattr(path, xattr, []byte(value), 0)
+}
diff --git a/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/xattr_unsupported.go b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/xattr_unsupported.go
new file mode 100644
index 0000000..821dea3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/xattr/xattr_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package xattr
+
+func Listxattr(path string) ([]string, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+func Getxattr(path, attr string) (string, error) {
+ return "", ErrNotSupportedPlatform
+}
+
+func Setxattr(path, xattr, value string) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS b/Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b382a04
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS
@@ -0,0 +1 @@
+Paul Borman
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/LICENSE b/Godeps/_workspace/src/github.com/pborman/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/dce.go b/Godeps/_workspace/src/github.com/pborman/uuid/dce.go
new file mode 100644
index 0000000..50a0f2d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/dce.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) UUID {
+ uuid := NewUUID()
+ if uuid != nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() UUID {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() UUID {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID or false.
+func (uuid UUID) Domain() (Domain, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return Domain(uuid[9]), true
+}
+
+// Id returns the id for a Version 2 UUID or false.
+func (uuid UUID) Id() (uint32, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return binary.BigEndian.Uint32(uuid[0:4]), true
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/doc.go b/Godeps/_workspace/src/github.com/pborman/uuid/doc.go
new file mode 100644
index 0000000..d8bd013
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The uuid package generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
+package uuid
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/hash.go b/Godeps/_workspace/src/github.com/pborman/uuid/hash.go
new file mode 100644
index 0000000..cdd4192
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known Name Space IDs and UUIDs
+var (
+ NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NIL = Parse("00000000-0000-0000-0000-000000000000")
+)
+
+// NewHash returns a new UUID dervied from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space)
+ h.Write([]byte(data))
+ s := h.Sum(nil)
+ uuid := make([]byte, 16)
+ copy(uuid, s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/json.go b/Godeps/_workspace/src/github.com/pborman/uuid/json.go
new file mode 100644
index 0000000..760580a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/json.go
@@ -0,0 +1,30 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "errors"
+
+func (u UUID) MarshalJSON() ([]byte, error) {
+ if len(u) == 0 {
+ return []byte(`""`), nil
+ }
+ return []byte(`"` + u.String() + `"`), nil
+}
+
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ if len(data) == 0 || string(data) == `""` {
+ return nil
+ }
+ if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("invalid UUID format")
+ }
+ data = data[1 : len(data)-1]
+ uu := Parse(string(data))
+ if uu == nil {
+ return errors.New("invalid UUID format")
+ }
+ *u = uu
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/node.go b/Godeps/_workspace/src/github.com/pborman/uuid/node.go
new file mode 100644
index 0000000..dd0a8ac
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/node.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "net"
+
+var (
+ interfaces []net.Interface // cached list of interfaces
+ ifname string // name of interface being used
+ nodeID []byte // hardware for version 1 UUIDs
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil && name != "" {
+ return false
+ }
+ }
+
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ if setNodeID(ifs.HardwareAddr) {
+ ifname = ifs.Name
+ return true
+ }
+ }
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ randomBits(nodeID)
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+ nid := make([]byte, 6)
+ copy(nid, nodeID)
+ return nid
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if setNodeID(id) {
+ ifname = "user"
+ return true
+ }
+ return false
+}
+
+func setNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ copy(nodeID, id)
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ if len(uuid) != 16 {
+ return nil
+ }
+ node := make([]byte, 6)
+ copy(node, uuid[10:])
+ return node
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/sql.go b/Godeps/_workspace/src/github.com/pborman/uuid/sql.go
new file mode 100644
index 0000000..2d7679e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/sql.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src.(type) {
+ case string:
+ // see uuid.Parse for required string format
+ parsed := Parse(src.(string))
+
+ if parsed == nil {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = parsed
+ case []byte:
+ // assumes a simple slice of bytes, just check validity and store
+ u := UUID(src.([]byte))
+
+ if u.Variant() == Invalid {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = u
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/time.go b/Godeps/_workspace/src/github.com/pborman/uuid/time.go
new file mode 100644
index 0000000..7ebc9be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/time.go
@@ -0,0 +1,132 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ mu sync.Mutex
+ lasttime uint64 // last time we returned
+ clock_seq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer mu.Unlock()
+ mu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clock_seq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence a new random
+// clock sequence is generated the first time a clock sequence is requested by
+// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
+// for
+func ClockSequence() int {
+ defer mu.Unlock()
+ mu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clock_seq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer mu.Unlock()
+ mu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ old_seq := clock_seq
+ clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if old_seq != clock_seq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. It returns false if uuid is not valid. The time is only well defined
+// for version 1 and 2 UUIDs.
+func (uuid UUID) Time() (Time, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time), true
+}
+
+// ClockSequence returns the clock sequence encoded in uuid. It returns false
+// if uuid is not valid. The clock sequence is only well defined for version 1
+// and 2 UUIDs.
+func (uuid UUID) ClockSequence() (int, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/util.go b/Godeps/_workspace/src/github.com/pborman/uuid/util.go
new file mode 100644
index 0000000..de40b10
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = []byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts the the first two hex bytes of x into a byte.
+func xtob(x string) (byte, bool) {
+ b1 := xvalues[x[0]]
+ b2 := xvalues[x[1]]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/uuid.go b/Godeps/_workspace/src/github.com/pborman/uuid/uuid.go
new file mode 100644
index 0000000..2920fae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/uuid.go
@@ -0,0 +1,163 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID []byte
+
+// A Version represents a UUIDs version.
+type Version byte
+
+// A Variant represents a UUIDs variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// New returns a new random (version 4) UUID as a string. It is a convenience
+// function for NewRandom().String().
+func New() string {
+ return NewRandom().String()
+}
+
+// Parse decodes s into a UUID or returns nil. Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) UUID {
+ if len(s) == 36+9 {
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return nil
+ }
+ s = s[9:]
+ } else if len(s) != 36 {
+ return nil
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return nil
+ }
+ uuid := make([]byte, 16)
+ for i, x := range []int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ if v, ok := xtob(s[x:]); !ok {
+ return nil
+ } else {
+ uuid[i] = v
+ }
+ }
+ return uuid
+}
+
+// Equal returns true if uuid1 and uuid2 are equal.
+func Equal(uuid1, uuid2 UUID) bool {
+ return bytes.Equal(uuid1, uuid2)
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// Variant returns the variant encoded in uuid. It returns Invalid if
+// uuid is invalid.
+func (uuid UUID) Variant() Variant {
+ if len(uuid) != 16 {
+ return Invalid
+ }
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+ panic("unreachable")
+}
+
+// Version returns the verison of uuid. It returns false if uuid is not
+// valid.
+func (uuid UUID) Version() (Version, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return Version(uuid[6] >> 4), true
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implents io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/version1.go b/Godeps/_workspace/src/github.com/pborman/uuid/version1.go
new file mode 100644
index 0000000..0127eac
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/version1.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil.
+func NewUUID() UUID {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+
+ now, seq, err := GetTime()
+ if err != nil {
+ return nil
+ }
+
+ uuid := make([]byte, 16)
+
+ time_low := uint32(now & 0xffffffff)
+ time_mid := uint16((now >> 32) & 0xffff)
+ time_hi := uint16((now >> 48) & 0x0fff)
+ time_hi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], time_low)
+ binary.BigEndian.PutUint16(uuid[4:], time_mid)
+ binary.BigEndian.PutUint16(uuid[6:], time_hi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID)
+
+ return uuid
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/version4.go b/Godeps/_workspace/src/github.com/pborman/uuid/version4.go
new file mode 100644
index 0000000..b3d4a36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/version4.go
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+// Random returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() UUID {
+ uuid := make([]byte, 16)
+ randomBits([]byte(uuid))
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/LICENSE b/Godeps/_workspace/src/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/NOTICE b/Godeps/_workspace/src/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..37e4a7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,28 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+goautoneg
+http://bitbucket.org/ww/goautoneg
+Copyright 2011, Open Knowledge Foundation Ltd.
+See README.txt for license details.
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 0000000..3460f03
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..81032be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1,53 @@
+# Overview
+This is the [Prometheus](http://www.prometheus.io) telemetric
+instrumentation client [Go](http://golang.org) client library. It
+enable authors to define process-space metrics for their servers and
+expose them through a web service interface for extraction,
+aggregation, and a whole slew of other post processing techniques.
+
+# Installing
+ $ go get github.com/prometheus/client_golang/prometheus
+
+# Example
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ indexed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "my_company",
+ Subsystem: "indexer",
+ Name: "documents_indexed",
+ Help: "The number of documents indexed.",
+ })
+ size = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "my_company",
+ Subsystem: "storage",
+ Name: "documents_total_size_bytes",
+ Help: "The total size of all documents in the storage.",
+ })
+)
+
+func main() {
+ http.Handle("/metrics", prometheus.Handler())
+
+ indexed.Inc()
+ size.Set(5)
+
+ http.ListenAndServe(":8080", nil)
+}
+
+func init() {
+ prometheus.MustRegister(indexed)
+ prometheus.MustRegister(size)
+}
+```
+
+# Documentation
+
+[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..c046880
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+//
+// The stock metrics provided by this package (like Gauge, Counter, Summary) are
+// also Collectors (which only ever collect one metric, namely itself). An
+// implementer of Collector may, however, collect multiple metrics in a
+// coordinated fashion and/or create metrics on the fly. Examples for collectors
+// already implemented in this library are the metric vectors (i.e. collection
+// of multiple instances of the same Metric but with different label values)
+// like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by Prometheus when collecting metrics. The
+ // implementation sends each collected metric via the provided channel
+ // and returns once the last metric has been sent. The descriptor of
+ // each sent metric is one of those returned by Describe. Returned
+ // metrics that share the same descriptor must differ in their variable
+ // label values. This method may be called concurrently and must
+ // therefore be implemented in a concurrency safe way. Blocking occurs
+ // at the expense of total performance of rendering all registered
+ // metrics. Ideally, Collector implementations support concurrent
+ // readers.
+ Collect(chan<- Metric)
+}
+
+// SelfCollector implements Collector for a single Metric so that that the
+// Metric collects itself. Add it as an anonymous field to a struct that
+// implements Metric, and call Init with the Metric itself as an argument.
+type SelfCollector struct {
+ self Metric
+}
+
+// Init provides the SelfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *SelfCollector) Init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *SelfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *SelfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..a2952d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,175 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "hash/fnv"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.Init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.Init(result) // Init self-collection.
+ return result
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..fcde784
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,201 @@
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ h := fnv.New64a()
+ var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
+ for _, val := range labelValues {
+ b.Reset()
+ b.WriteString(val)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.id = h.Sum64()
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ h.Reset()
+ b.Reset()
+ b.WriteString(help)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ for _, labelName := range labelNames {
+ b.Reset()
+ b.WriteString(labelName)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.dimHash = h.Sum64()
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..425fe87
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides embeddable metric primitives for servers and
+// standardized exposition of telemetry through a web services interface.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// To expose metrics registered with the Prometheus registry, an HTTP server
+// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+//
+// http.Handle("/metrics", prometheus.Handler())
+//
+// As a starting point a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// })
+// )
+//
+// func init() {
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.Inc()
+//
+// http.Handle("/metrics", prometheus.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter.
+// It also exports some stats about the HTTP usage of the /metrics
+// endpoint. (See the Handler function for more detail.)
+//
+// Two more advanced metric types are the Summary and Histogram.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// Those are all the parts needed for basic usage. Detailed documentation and
+// examples are provided below.
+//
+// Everything else this package offers is essentially for "power users" only. A
+// few pointers to "power user features":
+//
+// All the various ...Opts structs have a ConstLabels field for labels that
+// never change their value (which is only useful under special circumstances,
+// see documentation of the Opts type).
+//
+// The Untyped metric behaves like a Gauge, but signals the Prometheus server
+// not to assume anything about its type.
+//
+// Functions to fine-tune how the metric registry works: EnableCollectChecks,
+// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
+//
+// For custom metric collection, there are two entry points: Custom Metric
+// implementations and custom Collector implementations. A Metric is the
+// fundamental unit in the Prometheus data model: a sample at a point in time
+// together with its meta-data (like its fully-qualified name and any number of
+// pairs of label name and label value) that knows how to marshal itself into a
+// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
+// gets registered with the Prometheus registry and manages the collection of
+// one or more Metrics. Many parts of this package are building blocks for
+// Metrics and Collectors. Desc is the metric descriptor, actually used by all
+// metrics under the hood, and by Collectors to describe the Metrics to be
+// collected, but only to be dealt with by users if they implement their own
+// Metrics or Collectors. To create a Desc, the BuildFQName function will come
+// in handy. Other useful components for Metric and Collector implementation
+// include: LabelPairSorter to sort the DTO version of label pairs,
+// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
+// collection time, MetricVec to bundle custom Metrics into a metric vector
+// Collector, SelfCollector to make a custom Metric collect itself.
+//
+// A good example for a custom Collector is the ExpVarCollector included in this
+// package, which exports variables exported via the "expvar" package as
+// Prometheus metrics.
+package prometheus
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go
new file mode 100644
index 0000000..0f7630d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+// ExpvarCollector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the ExpvarCollector is inherently
+// slow. Thus, the ExpvarCollector is probably great for experiments and
+// prototying, but you should seriously consider a more direct implementation of
+// Prometheus metrics for monitoring production systems.
+//
+// Use NewExpvarCollector to create new instances.
+type ExpvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
+// to be registered with the Prometheus registry.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
+ return &ExpvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..ba8a402
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..8be2476
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() *goCollector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained by system. Sum of all system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes_total"),
+ "Total number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..f98a41b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,450 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+var (
+ // DefBuckets are the default Histogram buckets. The default buckets are
+ // tailored to broadly measure the response time (in seconds) of a
+ // network service. Most likely, however, you will be required to define
+ // buckets customized to your use case.
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.Init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ SelfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000..eabe602
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,361 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
+// flexibility (at the cost of a more complex call syntax). As
+// InstrumentHandler, this function registers four metric collectors, but it
+// uses the provided SummaryOpts to create them. However, the fields "Name" and
+// "Help" in the SummaryOpts are ignored. "Name" is replaced by
+// "requests_total", "request_duration_microseconds", "request_size_bytes", and
+// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
+// more flexibility (at the cost of a more complex call syntax). See
+// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..86fd81c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
+// Untyped, and Summary. Users can implement their own Metric types, but that
+// should be rarely needed. See the example for SelfCollector, which is also an
+// example for a user-implemented Metric.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Implementers of custom Metric types must observe concurrency safety
+ // as reads of this metric may occur at any time, and any blocking
+ // occurs at the expense of total performance of rendering all
+ // registered metrics. Ideally Metric implementations should support
+ // concurrent readers.
+ //
+ // The Prometheus client library attempts to minimize memory allocations
+ // and will provide a pre-existing reset dto.Metric pointer. Prometheus
+ // may recycle the dto.Metric proto message, so Metric implementations
+ // should just populate the provided dto.Metric and then should not keep
+ // any reference to it.
+ //
+ // While populating dto.Metric, labels must be sorted lexicographically.
+ // (Implementers may find LabelPairSorter useful for that.)
+ Write(*dto.Metric) error
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..d8cf0ed
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) *processCollector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) *processCollector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go
new file mode 100644
index 0000000..1c33848
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+// Push triggers a metric collection by the default registry and pushes all
+// collected metrics to the Pushgateway specified by addr. See the Pushgateway
+// documentation for detailed implications of the job and instance
+// parameter. instance can be left empty. You can use just host:port or ip:port
+// as url, in which case 'http://' is added automatically. You can also include
+// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and instance will
+// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
+// to push to the Pushgateway.)
+func Push(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "PUT")
+}
+
+// PushAdd works like Push, but only previously pushed metrics with the same
+// name (and the same job and instance) will be replaced. (It uses HTTP method
+// 'POST' to push to the Pushgateway.)
+func PushAdd(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "POST")
+}
+
+// PushCollectors works like Push, but it does not collect from the default
+// registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "PUT", collectors...)
+}
+
+// PushAddCollectors works like PushAdd, but it does not collect from the
+// default registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "POST", collectors...)
+}
+
+func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
+ r := newRegistry()
+ for _, collector := range collectors {
+ if _, err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return r.Push(job, instance, url, method)
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..5970aae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,726 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ defRegistry = newDefaultRegistry()
+ errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
+)
+
+// Constants relevant to the HTTP interface.
+const (
+ // APIVersion is the version of the format of the exported data. This
+ // will match this library's version, which subscribes to the Semantic
+ // Versioning scheme.
+ APIVersion = "0.0.4"
+
+ // DelimitedTelemetryContentType is the content type set on telemetry
+ // data responses in delimited protobuf format.
+ DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
+ // TextTelemetryContentType is the content type set on telemetry data
+ // responses in text format.
+ TextTelemetryContentType = `text/plain; version=` + APIVersion
+ // ProtoTextTelemetryContentType is the content type set on telemetry
+ // data responses in protobuf text format. (Only used for debugging.)
+ ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
+ // ProtoCompactTextTelemetryContentType is the content type set on
+ // telemetry data responses in protobuf compact text format. (Only used
+ // for debugging.)
+ ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
+
+ // Constants for object pools.
+ numBufs = 4
+ numMetricFamilies = 1000
+ numMetrics = 10000
+
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+
+ acceptEncodingHeader = "Accept-Encoding"
+ acceptHeader = "Accept"
+)
+
+// Handler returns the HTTP handler for the global Prometheus registry. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name). Usually the handler is used to handle the "/metrics" endpoint.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", defRegistry)
+}
+
+// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
+// handler is not instrumented. This is useful if no instrumentation is desired
+// (for whatever reason) or if the instrumentation has to happen with a
+// different handler name (or with a different instrumentation approach
+// altogether). See the InstrumentHandler example.
+func UninstrumentedHandler() http.Handler {
+ return defRegistry
+}
+
+// Register registers a new Collector to be included in metrics collection. It
+// returns an error if the descriptors provided by the Collector are invalid or
+// if they - in combination with descriptors of already registered Collectors -
+// do not fulfill the consistency and uniqueness criteria described in the Desc
+// documentation.
+//
+// Do not register the same Collector multiple times concurrently. (Registering
+// the same Collector twice would result in an error anyway, but on top of that,
+// it is not safe to do so concurrently.)
+func Register(m Collector) error {
+ _, err := defRegistry.Register(m)
+ return err
+}
+
+// MustRegister works like Register but panics where Register would have
+// returned an error.
+func MustRegister(m Collector) {
+ err := Register(m)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// RegisterOrGet works like Register but does not return an error if a Collector
+// is registered that equals a previously registered Collector. (Two Collectors
+// are considered equal if their Describe method yields the same set of
+// descriptors.) Instead, the previously registered Collector is returned (which
+// is helpful if the new and previously registered Collectors are equal but not
+// identical, i.e. not pointers to the same object).
+//
+// As for Register, it is still not safe to call RegisterOrGet with the same
+// Collector multiple times concurrently.
+func RegisterOrGet(m Collector) (Collector, error) {
+ return defRegistry.RegisterOrGet(m)
+}
+
+// MustRegisterOrGet works like Register but panics where RegisterOrGet would
+// have returned an error.
+func MustRegisterOrGet(m Collector) Collector {
+ existing, err := RegisterOrGet(m)
+ if err != nil {
+ panic(err)
+ }
+ return existing
+}
+
+// Unregister unregisters the Collector that equals the Collector passed in as
+// an argument. (Two Collectors are considered equal if their Describe method
+// yields the same set of descriptors.) The function returns whether a Collector
+// was unregistered.
+func Unregister(c Collector) bool {
+ return defRegistry.Unregister(c)
+}
+
+// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
+// are collected. The hook function must be set before metrics collection begins
+// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
+// MetricFamily protobufs returned by the hook function are merged with the
+// metrics collected in the usual way.
+//
+// This is a way to directly inject MetricFamily protobufs managed and owned by
+// the caller. The caller has full responsibility. As no registration of the
+// injected metrics has happened, there is no descriptor to check against, and
+// there are no registration-time checks. If collect-time checks are disabled
+// (see function EnableCollectChecks), no sanity checks are performed on the
+// returned protobufs at all. If collect-checks are enabled, type and uniqueness
+// checks are performed, but no further consistency checks (which would require
+// knowledge of a metric descriptor).
+//
+// Sorting concerns: The caller is responsible for sorting the label pairs in
+// each metric. However, the order of metrics will be sorted by the registry as
+// it is required anyway after merging with the metric families collected
+// conventionally.
+//
+// The function must be callable at any time and concurrently.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ defRegistry.metricFamilyInjectionHook = hook
+}
+
+// PanicOnCollectError sets the behavior whether a panic is caused upon an error
+// while metrics are collected and served to the HTTP endpoint. By default, an
+// internal server error (status code 500) is served with an error message.
+func PanicOnCollectError(b bool) {
+ defRegistry.panicOnCollectError = b
+}
+
+// EnableCollectChecks enables (or disables) additional consistency checks
+// during metrics collection. These additional checks are not enabled by default
+// because they inflict a performance penalty and the errors they check for can
+// only happen if the used Metric and Collector types have internal programming
+// errors. It can be helpful to enable these checks while working with custom
+// Collectors or Metrics whose correctness is not well established yet.
+func EnableCollectChecks(b bool) {
+ defRegistry.collectChecksEnabled = b
+}
+
+// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
+// certain encoding. It returns the number of bytes written and any error
+// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
+// are encoders.
+type encoder func(io.Writer, *dto.MetricFamily) (int, error)
+
+type registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ bufPool chan *bytes.Buffer
+ metricFamilyPool chan *dto.MetricFamily
+ metricPool chan *dto.Metric
+ metricFamilyInjectionHook func() []*dto.MetricFamily
+
+ panicOnCollectError, collectChecksEnabled bool
+}
+
+func (r *registry) Register(c Collector) (Collector, error) {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ newDescIDs := map[uint64]struct{}{}
+ newDimHashesByName := map[string]uint64{}
+ var collectorID uint64 // Just a sum of all desc IDs.
+ var duplicateDescErr error
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return nil, errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return existing, errAlreadyReg
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return nil, duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return c, nil
+}
+
+func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
+ existing, err := r.Register(m)
+ if err != nil && err != errAlreadyReg {
+ return nil, err
+ }
+ return existing, nil
+}
+
+func (r *registry) Unregister(c Collector) bool {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ descIDs := map[uint64]struct{}{}
+ var collectorID uint64 // Just a sum of the desc IDs.
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+func (r *registry) Push(job, instance, pushURL, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
+ if instance != "" {
+ pushURL += "/instances/" + url.QueryEscape(instance)
+ }
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ return err
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
+ }
+ return nil
+}
+
+func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ contentType := expfmt.Negotiate(req.Header)
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+}
+
+func (r *registry) writePB(encoder expfmt.Encoder) error {
+ var metricHashes map[uint64]struct{}
+ if r.collectChecksEnabled {
+ metricHashes = make(map[uint64]struct{})
+ }
+ metricChan := make(chan Metric, capMetricChan)
+ wg := sync.WaitGroup{}
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if !ok {
+ metricFamily = r.getMetricFamily()
+ defer r.giveMetricFamily(metricFamily)
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ dtoMetric := r.getMetric()
+ defer r.giveMetric(dtoMetric)
+ if err := metric.Write(dtoMetric); err != nil {
+ // TODO: Consider different means of error reporting so
+ // that a single erroneous metric could be skipped
+ // instead of blowing up the whole collection.
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ switch {
+ case metricFamily.Type != nil:
+ // Type already set. We are good.
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+
+ if r.metricFamilyInjectionHook != nil {
+ for _, mf := range r.metricFamilyInjectionHook() {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if !exists {
+ metricFamiliesByName[mf.GetName()] = mf
+ if r.collectChecksEnabled {
+ for _, m := range mf.Metric {
+ if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+ for _, m := range mf.Metric {
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
+ return err
+ }
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+
+ // Now that MetricFamilies are all set, sort their Metrics
+ // lexicographically by their label values.
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+
+ // Write out MetricFamilies sorted by their name.
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name := range metricFamiliesByName {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := fnv.New64a()
+ var buf bytes.Buffer
+ buf.WriteString(metricFamily.GetName())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check. Label pairs must be sorted by contract. But the point of this
+ // method is to check for contract violations. So we better do the sort
+ // now.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ buf.Reset()
+ buf.WriteString(lp.GetValue())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ }
+ metricHash := h.Sum64()
+ if _, exists := metricHashes[metricHash]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ metricHashes[metricHash] = struct{}{}
+
+ if desc == nil {
+ return nil // Nothing left to check if we have no desc.
+ }
+
+ // Desc consistency with metric family.
+ if metricFamily.GetName() != desc.fqName {
+ return fmt.Errorf(
+ "collected metric %s %s has name %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
+ )
+ }
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+
+ r.mtx.RLock() // Remaining checks need the read lock.
+ defer r.mtx.RUnlock()
+
+ // Is the desc registered?
+ if _, exist := r.descIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+
+ return nil
+}
+
+func (r *registry) getBuf() *bytes.Buffer {
+ select {
+ case buf := <-r.bufPool:
+ return buf
+ default:
+ return &bytes.Buffer{}
+ }
+}
+
+func (r *registry) giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ select {
+ case r.bufPool <- buf:
+ default:
+ }
+}
+
+func (r *registry) getMetricFamily() *dto.MetricFamily {
+ select {
+ case mf := <-r.metricFamilyPool:
+ return mf
+ default:
+ return &dto.MetricFamily{}
+ }
+}
+
+func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
+ mf.Reset()
+ select {
+ case r.metricFamilyPool <- mf:
+ default:
+ }
+}
+
+func (r *registry) getMetric() *dto.Metric {
+ select {
+ case m := <-r.metricPool:
+ return m
+ default:
+ return &dto.Metric{}
+ }
+}
+
+func (r *registry) giveMetric(m *dto.Metric) {
+ m.Reset()
+ select {
+ case r.metricPool <- m:
+ default:
+ }
+}
+
+func newRegistry() *registry {
+ return ®istry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ bufPool: make(chan *bytes.Buffer, numBufs),
+ metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
+ metricPool: make(chan *dto.Metric, numMetrics),
+ }
+}
+
+func newDefaultRegistry() *registry {
+ r := newRegistry()
+ r.Register(NewProcessCollector(os.Getpid(), ""))
+ r.Register(NewGoCollector())
+ return r
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..fe81e00
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,540 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var (
+ // DefObjectives are the default Summary quantile values.
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
+// method of perk/quantile is actually not working as advertised - and it might
+// be unfixable, as the underlying algorithm is apparently not capable of
+// merging summaries in the first place. To avoid using Merge, we are currently
+// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.Init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ SelfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..c65ab1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..b54ac11
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..a1f3bdf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,247 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "sync"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects not only children, but also hash and buf.
+ children map[uint64]Metric
+ desc *Desc
+
+ // hash is our own hash instance to avoid repeated allocations.
+ hash hash.Hash64
+ // buf is used to copy string contents into it for hashing,
+ // again to avoid allocations.
+ buf bytes.Buffer
+
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metric := range m.children {
+ ch <- metric
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+ lvs := make([]string, len(labels))
+ for i, label := range m.desc.variableLabels {
+ lvs[i] = labels[label]
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, val := range vals {
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
+ metric, ok := m.children[hash]
+ if !ok {
+ // Copy labelValues. Otherwise, they would be allocated even if we don't go
+ // down this code path.
+ copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
+ metric = m.newMetric(copiedLabelValues...)
+ m.children[hash] = metric
+ }
+ return metric
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_model/LICENSE b/Godeps/_workspace/src/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_model/NOTICE b/Godeps/_workspace/src/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go b/Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..b065f86
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/client_model/ruby/LICENSE b/Godeps/_workspace/src/github.com/prometheus/client_model/ruby/LICENSE
new file mode 100644
index 0000000..11069ed
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/client_model/ruby/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..b72c9be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,411 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const (
+ textType = "text/plain"
+ jsonType = "application/json"
+ )
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+
+ case jsonType:
+ var prometheusAPIVersion string
+
+ if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
+ prometheusAPIVersion = params["version"]
+ } else {
+ prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
+ }
+
+ switch prometheusAPIVersion {
+ case "0.0.2", "":
+ return fmtJSON2
+ default:
+ return FmtUnknown
+ }
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ case fmtJSON2:
+ return newJSON2Decoder(r)
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ return err
+}
+
+// textDecoder implements the Decoder interface for the text protcol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/encode.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..392ca90
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "bitbucket.org/ww/goautoneg"
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/expfmt.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..366fbde
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = ``
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+
+ // fmtJSON2 is hidden as it is deprecated.
+ fmtJSON2 Format = `application/json; version=0.0.2`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..14f9201
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/client_golang/text
+// go-fuzz -bin text-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
new file mode 100644
index 0000000..139597f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
@@ -0,0 +1,2 @@
+
+
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
new file mode 100644
index 0000000..2ae8706
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
@@ -0,0 +1,6 @@
+
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
new file mode 100644
index 0000000..5c351db
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
@@ -0,0 +1,12 @@
+
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
new file mode 100644
index 0000000..0b3c345
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
@@ -0,0 +1,22 @@
+
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
new file mode 100644
index 0000000..bde0a38
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
@@ -0,0 +1,10 @@
+
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
new file mode 100644
index 0000000..4c67f9a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
@@ -0,0 +1 @@
+bla 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
new file mode 100644
index 0000000..b853478
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
@@ -0,0 +1 @@
+metric{label="\t"} 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
new file mode 100644
index 0000000..b5fe5f5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2 3
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
new file mode 100644
index 0000000..57c7fbc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
@@ -0,0 +1 @@
+metric{label="bla"} blubb
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
new file mode 100644
index 0000000..0a9df79
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
@@ -0,0 +1,3 @@
+
+# HELP metric one
+# HELP metric two
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
new file mode 100644
index 0000000..5bc7427
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
@@ -0,0 +1,3 @@
+
+# TYPE metric counter
+# TYPE metric untyped
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
new file mode 100644
index 0000000..a9a2426
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
@@ -0,0 +1,3 @@
+
+metric 4.12
+# TYPE metric counter
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
new file mode 100644
index 0000000..7e95ca8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
@@ -0,0 +1,2 @@
+
+# TYPE metric bla
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
new file mode 100644
index 0000000..7825f88
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
@@ -0,0 +1,2 @@
+
+# TYPE met-ric
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
new file mode 100644
index 0000000..8f35cae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
@@ -0,0 +1 @@
+@invalidmetric{label="bla"} 3.14 2
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
new file mode 100644
index 0000000..7ca2cc2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
@@ -0,0 +1 @@
+{label="bla"} 3.14 2
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
new file mode 100644
index 0000000..7a6ccc0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
@@ -0,0 +1,3 @@
+
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
new file mode 100644
index 0000000..726d001
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
@@ -0,0 +1,3 @@
+
+metric{label="new
+line"} 3.14
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
new file mode 100644
index 0000000..6aa9e30
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
@@ -0,0 +1 @@
+metric{@="bla"} 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
new file mode 100644
index 0000000..d112cb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
@@ -0,0 +1 @@
+metric{__name__="bla"} 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
new file mode 100644
index 0000000..b34554a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
@@ -0,0 +1 @@
+metric{label+="bla"} 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
new file mode 100644
index 0000000..c4d7df3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
@@ -0,0 +1 @@
+metric{label=bla} 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
new file mode 100644
index 0000000..97eafc4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
@@ -0,0 +1,3 @@
+
+# TYPE metric summary
+metric{quantile="bla"} 3.14
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
new file mode 100644
index 0000000..fc70649
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
@@ -0,0 +1 @@
+metric{label="bla"+} 3.14
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
new file mode 100644
index 0000000..57b4879
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2.72
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
new file mode 100644
index 0000000..be1e6a3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
@@ -0,0 +1 @@
+m{} 0
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/json_decode.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/json_decode.go
new file mode 100644
index 0000000..67e3a0d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/json_decode.go
@@ -0,0 +1,162 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
+)
+
+type json2Decoder struct {
+ dec *json.Decoder
+ fams []*dto.MetricFamily
+}
+
+func newJSON2Decoder(r io.Reader) Decoder {
+ return &json2Decoder{
+ dec: json.NewDecoder(r),
+ }
+}
+
+type histogram002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Values map[string]float64 `json:"value"`
+}
+
+type counter002 struct {
+ Labels model.LabelSet `json:"labels"`
+ Value float64 `json:"value"`
+}
+
+func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
+ labels := base.Clone().Merge(ext)
+ delete(labels, model.MetricNameLabel)
+
+ names := make([]string, 0, len(labels))
+ for ln := range labels {
+ names = append(names, string(ln))
+ }
+ sort.Strings(names)
+
+ pairs := make([]*dto.LabelPair, 0, len(labels))
+
+ for _, ln := range names {
+ lv := labels[model.LabelName(ln)]
+
+ pairs = append(pairs, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(string(lv)),
+ })
+ }
+
+ return pairs
+}
+
+func (d *json2Decoder) more() error {
+ var entities []struct {
+ BaseLabels model.LabelSet `json:"baseLabels"`
+ Docstring string `json:"docstring"`
+ Metric struct {
+ Type string `json:"type"`
+ Values json.RawMessage `json:"value"`
+ } `json:"metric"`
+ }
+
+ if err := d.dec.Decode(&entities); err != nil {
+ return err
+ }
+ for _, e := range entities {
+ f := &dto.MetricFamily{
+ Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
+ Help: proto.String(e.Docstring),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{},
+ }
+
+ d.fams = append(d.fams, f)
+
+ switch e.Metric.Type {
+ case "counter", "gauge":
+ var values []counter002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, ctr := range values {
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: protoLabelSet(e.BaseLabels, ctr.Labels),
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(ctr.Value),
+ },
+ })
+ }
+
+ case "histogram":
+ var values []histogram002
+
+ if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
+ return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
+ }
+
+ for _, hist := range values {
+ quants := make([]string, 0, len(values))
+ for q := range hist.Values {
+ quants = append(quants, q)
+ }
+
+ sort.Strings(quants)
+
+ for _, q := range quants {
+ value := hist.Values[q]
+ // The correct label is "quantile" but to not break old expressions
+ // this remains "percentile"
+ hist.Labels["percentile"] = model.LabelValue(q)
+
+ f.Metric = append(f.Metric, &dto.Metric{
+ Label: protoLabelSet(e.BaseLabels, hist.Labels),
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(value),
+ },
+ })
+ }
+ }
+
+ default:
+ return fmt.Errorf("unknown metric type %q", e.Metric.Type)
+ }
+ }
+ return nil
+}
+
+// Decode implements the Decoder interface.
+func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
+ if len(d.fams) == 0 {
+ if err := d.more(); err != nil {
+ return err
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..0bb9c14
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,305 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..84433bc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,746 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/alert.go b/Godeps/_workspace/src/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..b027e9f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,109 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(time.Now())
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/fingerprinting.go b/Godeps/_workspace/src/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/labels.go b/Godeps/_workspace/src/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..6459c8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,188 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/labelset.go b/Godeps/_workspace/src/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..142b9d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,153 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/metric.go b/Godeps/_workspace/src/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..25fc3c9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,81 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+var separator = []byte{0}
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/model.go b/Godeps/_workspace/src/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..88f013a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus componenets and libraries.
+package model
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/signature.go b/Godeps/_workspace/src/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..28f3700
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,190 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "bytes"
+ "hash"
+ "hash/fnv"
+ "sort"
+ "sync"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = fnv.New64a().Sum64()
+
+ hashAndBufPool sync.Pool
+)
+
+type hashAndBuf struct {
+ h hash.Hash64
+ b bytes.Buffer
+}
+
+func getHashAndBuf() *hashAndBuf {
+ hb := hashAndBufPool.Get()
+ if hb == nil {
+ return &hashAndBuf{h: fnv.New64a()}
+ }
+ return hb.(*hashAndBuf)
+}
+
+func putHashAndBuf(hb *hashAndBuf) {
+ hb.h.Reset()
+ hb.b.Reset()
+ hashAndBufPool.Put(hb)
+}
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(labelName)
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(labels[labelName])
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(ls[labelName]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return Fingerprint(hb.h.Sum64())
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for labelName, labelValue := range ls {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(labelValue))
+ hb.h.Write(hb.b.Bytes())
+ result ^= hb.h.Sum64()
+ hb.h.Reset()
+ hb.b.Reset()
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(m) == 0 || len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, label := range labels {
+ hb.b.WriteString(string(label))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(m[label]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(m[labelName]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/silence.go b/Godeps/_workspace/src/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..b4b96ea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/time.go b/Godeps/_workspace/src/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..ebc8bf6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/time.go
@@ -0,0 +1,230 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ durSeconds, _ := strconv.Atoi(matches[1])
+ dur := time.Duration(durSeconds) * time.Second
+ unit := matches[2]
+ switch unit {
+ case "d":
+ dur *= 60 * 60 * 24
+ case "h":
+ dur *= 60 * 60
+ case "m":
+ dur *= 60
+ case "s":
+ dur *= 1
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
+
+func (d Duration) String() string {
+ seconds := int64(time.Duration(d) / time.Second)
+ factors := map[string]int64{
+ "d": 60 * 60 * 24,
+ "h": 60 * 60,
+ "m": 60,
+ "s": 1,
+ }
+ unit := "s"
+ switch int64(0) {
+ case seconds % factors["d"]:
+ unit = "d"
+ case seconds % factors["h"]:
+ unit = "h"
+ case seconds % factors["m"]:
+ unit = "m"
+ }
+ return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/value.go b/Godeps/_workspace/src/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..10ffb0b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/common/model/value.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+func (v SampleValue) Equal(o SampleValue) bool {
+ return v == o
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value != o.Value {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md b/Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 0000000..6eb1935
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,11 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Tobias Schmidt
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..5705f0f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE b/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE b/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/README.md b/Godeps/_workspace/src/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..0edf496
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/README.md
@@ -0,0 +1,11 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Circle CI](https://circleci.com/gh/prometheus/procfs.svg?style=svg)](https://circleci.com/gh/prometheus/procfs)
+
+# Testing
+
+ $ go test
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go b/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..e2acd6d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline
new file mode 100644
index 0000000..d2d8ef8
Binary files /dev/null and b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline differ
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/4 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/4
new file mode 100644
index 0000000..e69de29
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits
new file mode 100644
index 0000000..23c6b68
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat
new file mode 100644
index 0000000..438aaa9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat
@@ -0,0 +1 @@
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat
new file mode 100644
index 0000000..65b9369
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat
@@ -0,0 +1,2 @@
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat
new file mode 100644
index 0000000..dabb96f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat
@@ -0,0 +1,16 @@
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 0
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go b/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..838474a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,36 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+func (fs FS) stat(p string) (os.FileInfo, error) {
+ return os.Stat(path.Join(string(fs), p))
+}
+
+func (fs FS) open(p string) (*os.File, error) {
+ return os.Open(path.Join(string(fs), p))
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..21445cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,149 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process.
+func Self() (Proc, error) {
+ return NewProc(os.Getpid())
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently avaible processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+
+ return fs.AllProcs()
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
+ return Proc{}, err
+ }
+
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently avaible processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := fs.open("")
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := p.open("cmdline")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := p.open("fd")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) open(pa string) (*os.File, error) {
+ return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..9f080b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,111 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits.
+type ProcLimits struct {
+ CPUTime int
+ FileSize int
+ DataSize int
+ StackSize int
+ CoreFileSize int
+ ResidentSet int
+ Processes int
+ OpenFiles int
+ LockedMemory int
+ AddressSpace int
+ FileLocks int
+ PendingSignals int
+ MsqqueueSize int
+ NicePriority int
+ RealtimePriority int
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := p.open("limits")
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..1e02776
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,165 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// #include
+import "C"
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := p.open("stat")
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / ticks()), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / ticks()
+}
+
+func ticks() float64 {
+ return float64(C.sysconf(C._SC_CLK_TCK)) // most likely 100
+}
diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go b/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..26fefb0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := fs.open("stat")
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE
new file mode 100644
index 0000000..81cf60d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015 Matthew Heon
+Copyright (c) 2015 Paul Moore
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+- Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/README b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/README
new file mode 100644
index 0000000..64cab69
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/README
@@ -0,0 +1,26 @@
+libseccomp-golang: Go Language Bindings for the libseccomp Project
+===============================================================================
+https://github.com/seccomp/libseccomp-golang
+https://github.com/seccomp/libseccomp
+
+The libseccomp library provides an easy to use, platform independent, interface
+to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
+designed to abstract away the underlying BPF based syscall filter language and
+present a more conventional function-call based filtering interface that should
+be familiar to, and easily adopted by, application developers.
+
+The libseccomp-golang library provides a Go based interface to the libseccomp
+library.
+
+* Online Resources
+
+The library source repository currently lives on GitHub at the following URLs:
+
+ -> https://github.com/seccomp/libseccomp-golang
+ -> https://github.com/seccomp/libseccomp
+
+The project mailing list is currently hosted on Google Groups at the URL below,
+please note that a Google account is not required to subscribe to the mailing
+list.
+
+ -> https://groups.google.com/d/forum/libseccomp
diff --git a/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp.go b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp.go
new file mode 100644
index 0000000..cebafdf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -0,0 +1,827 @@
+// +build linux
+
+// Public API specification for libseccomp Go bindings
+// Contains public API for the bindings
+
+// Package seccomp rovides bindings for libseccomp, a library wrapping the Linux
+// seccomp syscall. Seccomp enables an application to restrict system call use
+// for itself and its children.
+package seccomp
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// C wrapping code
+
+// #cgo LDFLAGS: -lseccomp
+// #include
+// #include
+import "C"
+
+// Exported types
+
+// ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a
+// per-architecture basis.
+type ScmpArch uint
+
+// ScmpAction represents an action to be taken on a filter rule match in
+// libseccomp
+type ScmpAction uint
+
+// ScmpCompareOp represents a comparison operator which can be used in a filter
+// rule
+type ScmpCompareOp uint
+
+// ScmpCondition represents a rule in a libseccomp filter context
+type ScmpCondition struct {
+ Argument uint `json:"argument,omitempty"`
+ Op ScmpCompareOp `json:"operator,omitempty"`
+ Operand1 uint64 `json:"operand_one,omitempty"`
+ Operand2 uint64 `json:"operand_two,omitempty"`
+}
+
+// ScmpSyscall represents a Linux System Call
+type ScmpSyscall int32
+
+// Exported Constants
+
+const (
+ // Valid architectures recognized by libseccomp
+ // ARM64 and all MIPS architectures are unsupported by versions of the
+ // library before v2.2 and will return errors if used
+
+ // ArchInvalid is a placeholder to ensure uninitialized ScmpArch
+ // variables are invalid
+ ArchInvalid ScmpArch = iota
+ // ArchNative is the native architecture of the kernel
+ ArchNative ScmpArch = iota
+ // ArchX86 represents 32-bit x86 syscalls
+ ArchX86 ScmpArch = iota
+ // ArchAMD64 represents 64-bit x86-64 syscalls
+ ArchAMD64 ScmpArch = iota
+ // ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers)
+ ArchX32 ScmpArch = iota
+ // ArchARM represents 32-bit ARM syscalls
+ ArchARM ScmpArch = iota
+ // ArchARM64 represents 64-bit ARM syscalls
+ ArchARM64 ScmpArch = iota
+ // ArchMIPS represents 32-bit MIPS syscalls
+ ArchMIPS ScmpArch = iota
+ // ArchMIPS64 represents 64-bit MIPS syscalls
+ ArchMIPS64 ScmpArch = iota
+ // ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers)
+ ArchMIPS64N32 ScmpArch = iota
+ // ArchMIPSEL represents 32-bit MIPS syscalls (little endian)
+ ArchMIPSEL ScmpArch = iota
+ // ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian)
+ ArchMIPSEL64 ScmpArch = iota
+ // ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian,
+ // 32-bit pointers)
+ ArchMIPSEL64N32 ScmpArch = iota
+)
+
+const (
+ // Supported actions on filter match
+
+ // ActInvalid is a placeholder to ensure uninitialized ScmpAction
+ // variables are invalid
+ ActInvalid ScmpAction = iota
+ // ActKill kills the process
+ ActKill ScmpAction = iota
+ // ActTrap throws SIGSYS
+ ActTrap ScmpAction = iota
+ // ActErrno causes the syscall to return a negative error code. This
+ // code can be set with the SetReturnCode method
+ ActErrno ScmpAction = iota
+ // ActTrace causes the syscall to notify tracing processes with the
+ // given error code. This code can be set with the SetReturnCode method
+ ActTrace ScmpAction = iota
+ // ActAllow permits the syscall to continue execution
+ ActAllow ScmpAction = iota
+)
+
+const (
+ // These are comparison operators used in conditional seccomp rules
+ // They are used to compare the value of a single argument of a syscall
+ // against a user-defined constant
+
+ // CompareInvalid is a placeholder to ensure uninitialized ScmpCompareOp
+ // variables are invalid
+ CompareInvalid ScmpCompareOp = iota
+ // CompareNotEqual returns true if the argument is not equal to the
+ // given value
+ CompareNotEqual ScmpCompareOp = iota
+ // CompareLess returns true if the argument is less than the given value
+ CompareLess ScmpCompareOp = iota
+ // CompareLessOrEqual returns true if the argument is less than or equal
+ // to the given value
+ CompareLessOrEqual ScmpCompareOp = iota
+ // CompareEqual returns true if the argument is equal to the given value
+ CompareEqual ScmpCompareOp = iota
+ // CompareGreaterEqual returns true if the argument is greater than or
+ // equal to the given value
+ CompareGreaterEqual ScmpCompareOp = iota
+ // CompareGreater returns true if the argument is greater than the given
+ // value
+ CompareGreater ScmpCompareOp = iota
+ // CompareMaskedEqual returns true if the argument is equal to the given
+ // value, when masked (bitwise &) against the second given value
+ CompareMaskedEqual ScmpCompareOp = iota
+)
+
+// Helpers for types
+
+// GetArchFromString returns an ScmpArch constant from a string representing an
+// architecture
+func GetArchFromString(arch string) (ScmpArch, error) {
+ switch strings.ToLower(arch) {
+ case "x86":
+ return ArchX86, nil
+ case "amd64", "x86-64", "x86_64", "x64":
+ return ArchAMD64, nil
+ case "x32":
+ return ArchX32, nil
+ case "arm":
+ return ArchARM, nil
+ case "arm64", "aarch64":
+ return ArchARM64, nil
+ case "mips":
+ return ArchMIPS, nil
+ case "mips64":
+ return ArchMIPS64, nil
+ case "mips64n32":
+ return ArchMIPS64N32, nil
+ case "mipsel":
+ return ArchMIPSEL, nil
+ case "mipsel64":
+ return ArchMIPSEL64, nil
+ case "mipsel64n32":
+ return ArchMIPSEL64N32, nil
+ default:
+ return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %s", arch)
+ }
+}
+
+// String returns a string representation of an architecture constant
+func (a ScmpArch) String() string {
+ switch a {
+ case ArchX86:
+ return "x86"
+ case ArchAMD64:
+ return "amd64"
+ case ArchX32:
+ return "x32"
+ case ArchARM:
+ return "arm"
+ case ArchARM64:
+ return "arm64"
+ case ArchMIPS:
+ return "mips"
+ case ArchMIPS64:
+ return "mips64"
+ case ArchMIPS64N32:
+ return "mips64n32"
+ case ArchMIPSEL:
+ return "mipsel"
+ case ArchMIPSEL64:
+ return "mipsel64"
+ case ArchMIPSEL64N32:
+ return "mipsel64n32"
+ case ArchNative:
+ return "native"
+ case ArchInvalid:
+ return "Invalid architecture"
+ default:
+ return "Unknown architecture"
+ }
+}
+
+// String returns a string representation of a comparison operator constant
+func (a ScmpCompareOp) String() string {
+ switch a {
+ case CompareNotEqual:
+ return "Not equal"
+ case CompareLess:
+ return "Less than"
+ case CompareLessOrEqual:
+ return "Less than or equal to"
+ case CompareEqual:
+ return "Equal"
+ case CompareGreaterEqual:
+ return "Greater than or equal to"
+ case CompareGreater:
+ return "Greater than"
+ case CompareMaskedEqual:
+ return "Masked equality"
+ case CompareInvalid:
+ return "Invalid comparison operator"
+ default:
+ return "Unrecognized comparison operator"
+ }
+}
+
+// String returns a string representation of a seccomp match action
+func (a ScmpAction) String() string {
+ switch a & 0xFFFF {
+ case ActKill:
+ return "Action: Kill Process"
+ case ActTrap:
+ return "Action: Send SIGSYS"
+ case ActErrno:
+ return fmt.Sprintf("Action: Return error code %d", (a >> 16))
+ case ActTrace:
+ return fmt.Sprintf("Action: Notify tracing processes with code %d",
+ (a >> 16))
+ case ActAllow:
+ return "Action: Allow system call"
+ default:
+ return "Unrecognized Action"
+ }
+}
+
+// SetReturnCode adds a return code to a supporting ScmpAction, clearing any
+// existing code Only valid on ActErrno and ActTrace. Takes no action otherwise.
+// Accepts 16-bit return code as argument.
+// Returns a valid ScmpAction of the original type with the new error code set.
+func (a ScmpAction) SetReturnCode(code int16) ScmpAction {
+ aTmp := a & 0x0000FFFF
+ if aTmp == ActErrno || aTmp == ActTrace {
+ return (aTmp | (ScmpAction(code)&0xFFFF)<<16)
+ }
+ return a
+}
+
+// GetReturnCode returns the return code of an ScmpAction
+func (a ScmpAction) GetReturnCode() int16 {
+ return int16(a >> 16)
+}
+
+// General utility functions
+
+// GetLibraryVersion returns the version of the library the bindings are built
+// against.
+// The version is formatted as follows: Major.Minor.Micro
+func GetLibraryVersion() (major, minor, micro int) {
+ return verMajor, verMinor, verMicro
+}
+
+// Syscall functions
+
+// GetName retrieves the name of a syscall from its number.
+// Acts on any syscall number.
+// Returns either a string containing the name of the syscall, or an error.
+func (s ScmpSyscall) GetName() (string, error) {
+ return s.GetNameByArch(ArchNative)
+}
+
+// GetNameByArch retrieves the name of a syscall from its number for a given
+// architecture.
+// Acts on any syscall number.
+// Accepts a valid architecture constant.
+// Returns either a string containing the name of the syscall, or an error.
+// if the syscall is unrecognized or an issue occurred.
+func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) {
+ if err := sanitizeArch(arch); err != nil {
+ return "", err
+ }
+
+ cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s))
+ if cString == nil {
+ return "", fmt.Errorf("could not resolve syscall name")
+ }
+ defer C.free(unsafe.Pointer(cString))
+
+ finalStr := C.GoString(cString)
+ return finalStr, nil
+}
+
+// GetSyscallFromName returns the number of a syscall by name on the kernel's
+// native architecture.
+// Accepts a string containing the name of a syscall.
+// Returns the number of the syscall, or an error if no syscall with that name
+// was found.
+func GetSyscallFromName(name string) (ScmpSyscall, error) {
+ cString := C.CString(name)
+ defer C.free(unsafe.Pointer(cString))
+
+ result := C.seccomp_syscall_resolve_name(cString)
+ if result == scmpError {
+ return 0, fmt.Errorf("could not resolve name to syscall")
+ }
+
+ return ScmpSyscall(result), nil
+}
+
+// GetSyscallFromNameByArch returns the number of a syscall by name for a given
+// architecture's ABI.
+// Accepts the name of a syscall and an architecture constant.
+// Returns the number of the syscall, or an error if an invalid architecture is
+// passed or a syscall with that name was not found.
+func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) {
+ if err := sanitizeArch(arch); err != nil {
+ return 0, err
+ }
+
+ cString := C.CString(name)
+ defer C.free(unsafe.Pointer(cString))
+
+ result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString)
+ if result == scmpError {
+ return 0, fmt.Errorf("could not resolve name to syscall")
+ }
+
+ return ScmpSyscall(result), nil
+}
+
+// MakeCondition creates and returns a new condition to attach to a filter rule.
+// Associated rules will only match if this condition is true.
+// Accepts the number the argument we are checking, and a comparison operator
+// and value to compare to.
+// The rule will match if argument $arg (zero-indexed) of the syscall is
+// $COMPARE_OP the provided comparison value.
+// Some comparison operators accept two values. Masked equals, for example,
+// will mask $arg of the syscall with the second value provided (via bitwise
+// AND) and then compare against the first value provided.
+// For example, in the less than or equal case, if the syscall argument was
+// 0 and the value provided was 1, the condition would match, as 0 is less
+// than or equal to 1.
+// Return either an error on bad argument or a valid ScmpCondition struct.
+func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCondition, error) {
+ var condStruct ScmpCondition
+
+ if comparison == CompareInvalid {
+ return condStruct, fmt.Errorf("invalid comparison operator")
+ } else if arg > 5 {
+ return condStruct, fmt.Errorf("syscalls only have up to 6 arguments")
+ } else if len(values) > 2 {
+ return condStruct, fmt.Errorf("conditions can have at most 2 arguments")
+ } else if len(values) == 0 {
+ return condStruct, fmt.Errorf("must provide at least one value to compare against")
+ }
+
+ condStruct.Argument = arg
+ condStruct.Op = comparison
+ condStruct.Operand1 = values[0]
+ if len(values) == 2 {
+ condStruct.Operand2 = values[1]
+ } else {
+ condStruct.Operand2 = 0 // Unused
+ }
+
+ return condStruct, nil
+}
+
+// Utility Functions
+
+// GetNativeArch returns architecture token representing the native kernel
+// architecture
+func GetNativeArch() (ScmpArch, error) {
+ arch := C.seccomp_arch_native()
+
+ return archFromNative(arch)
+}
+
+// Public Filter API
+
+// ScmpFilter represents a filter context in libseccomp.
+// A filter context is initially empty. Rules can be added to it, and it can
+// then be loaded into the kernel.
+type ScmpFilter struct {
+ filterCtx C.scmp_filter_ctx
+ valid bool
+ lock sync.Mutex
+}
+
+// NewFilter creates and returns a new filter context.
+// Accepts a default action to be taken for syscalls which match no rules in
+// the filter.
+// Returns a reference to a valid filter context, or nil and an error if the
+// filter context could not be created or an invalid default action was given.
+func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) {
+ if err := sanitizeAction(defaultAction); err != nil {
+ return nil, err
+ }
+
+ fPtr := C.seccomp_init(defaultAction.toNative())
+ if fPtr == nil {
+ return nil, fmt.Errorf("could not create filter")
+ }
+
+ filter := new(ScmpFilter)
+ filter.filterCtx = fPtr
+ filter.valid = true
+ runtime.SetFinalizer(filter, filterFinalizer)
+
+ return filter, nil
+}
+
+// IsValid determines whether a filter context is valid to use.
+// Some operations (Release and Merge) render filter contexts invalid and
+// consequently prevent further use.
+func (f *ScmpFilter) IsValid() bool {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ return f.valid
+}
+
+// Reset resets a filter context, removing all its existing state.
+// Accepts a new default action to be taken for syscalls which do not match.
+// Returns an error if the filter or action provided are invalid.
+func (f *ScmpFilter) Reset(defaultAction ScmpAction) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeAction(defaultAction); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ retCode := C.seccomp_reset(f.filterCtx, defaultAction.toNative())
+ if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Release releases a filter context, freeing its memory. Should be called after
+// loading into the kernel, when the filter is no longer needed.
+// After calling this function, the given filter is no longer valid and cannot
+// be used.
+// Release() will be invoked automatically when a filter context is garbage
+// collected, but can also be called manually to free memory.
+func (f *ScmpFilter) Release() {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return
+ }
+
+ f.valid = false
+ C.seccomp_release(f.filterCtx)
+}
+
+// Merge merges two filter contexts.
+// The source filter src will be released as part of the process, and will no
+// longer be usable or valid after this call.
+// To be merged, filters must NOT share any architectures, and all their
+// attributes (Default Action, Bad Arch Action, No New Privs and TSync bools)
+// must match.
+// The filter src will be merged into the filter this is called on.
+// The architectures of the src filter not present in the destination, and all
+// associated rules, will be added to the destination.
+// Returns an error if merging the filters failed.
+func (f *ScmpFilter) Merge(src *ScmpFilter) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ src.lock.Lock()
+ defer src.lock.Unlock()
+
+ if !src.valid || !f.valid {
+ return fmt.Errorf("one or more of the filter contexts is invalid or uninitialized")
+ }
+
+ // Merge the filters
+ retCode := C.seccomp_merge(f.filterCtx, src.filterCtx)
+ if syscall.Errno(-1*retCode) == syscall.EINVAL {
+ return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
+ } else if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ src.valid = false
+
+ return nil
+}
+
+// IsArchPresent checks if an architecture is present in a filter.
+// If a filter contains an architecture, it uses its default action for
+// syscalls which do not match rules in it, and its rules can match syscalls
+// for that ABI.
+// If a filter does not contain an architecture, all syscalls made to that
+// kernel ABI will fail with the filter's default Bad Architecture Action
+// (by default, killing the process).
+// Accepts an architecture constant.
+// Returns true if the architecture is present in the filter, false otherwise,
+// and an error on an invalid filter context, architecture constant, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) IsArchPresent(arch ScmpArch) (bool, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return false, err
+ } else if !f.valid {
+ return false, errBadFilter
+ }
+
+ retCode := C.seccomp_arch_exist(f.filterCtx, arch.toNative())
+ if syscall.Errno(-1*retCode) == syscall.EEXIST {
+ // -EEXIST is "arch not present"
+ return false, nil
+ } else if retCode != 0 {
+ return false, syscall.Errno(-1 * retCode)
+ }
+
+ return true, nil
+}
+
+// AddArch adds an architecture to the filter.
+// Accepts an architecture constant.
+// Returns an error on invalid filter context or architecture token, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) AddArch(arch ScmpArch) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ // Libseccomp returns -EEXIST if the specified architecture is already
+ // present. Succeed silently in this case, as it's not fatal, and the
+ // architecture is present already.
+ retCode := C.seccomp_arch_add(f.filterCtx, arch.toNative())
+ if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// RemoveArch removes an architecture from the filter.
+// Accepts an architecture constant.
+// Returns an error on invalid filter context or architecture token, or an
+// issue with the call to libseccomp.
+func (f *ScmpFilter) RemoveArch(arch ScmpArch) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if err := sanitizeArch(arch); err != nil {
+ return err
+ } else if !f.valid {
+ return errBadFilter
+ }
+
+ // Similar to AddArch, -EEXIST is returned if the arch is not present
+ // Succeed silently in that case, this is not fatal and the architecture
+ // is not present in the filter after RemoveArch
+ retCode := C.seccomp_arch_remove(f.filterCtx, arch.toNative())
+ if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Load loads a filter context into the kernel.
+// Returns an error if the filter context is invalid or the syscall failed.
+func (f *ScmpFilter) Load() error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_load(f.filterCtx); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// GetDefaultAction returns the default action taken on a syscall which does not
+// match a rule in the filter, or an error if an issue was encountered
+// retrieving the value.
+func (f *ScmpFilter) GetDefaultAction() (ScmpAction, error) {
+ action, err := f.getFilterAttr(filterAttrActDefault)
+ if err != nil {
+ return 0x0, err
+ }
+
+ return actionFromNative(action)
+}
+
+// GetBadArchAction returns the default action taken on a syscall for an
+// architecture not in the filter, or an error if an issue was encountered
+// retrieving the value.
+func (f *ScmpFilter) GetBadArchAction() (ScmpAction, error) {
+ action, err := f.getFilterAttr(filterAttrActBadArch)
+ if err != nil {
+ return 0x0, err
+ }
+
+ return actionFromNative(action)
+}
+
+// GetNoNewPrivsBit returns the current state the No New Privileges bit will be set
+// to on the filter being loaded, or an error if an issue was encountered
+// retrieving the value.
+// The No New Privileges bit tells the kernel that new processes run with exec()
+// cannot gain more privileges than the process that ran exec().
+// For example, a process with No New Privileges set would be unable to exec
+// setuid/setgid executables.
+func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
+ noNewPrivs, err := f.getFilterAttr(filterAttrNNP)
+ if err != nil {
+ return false, err
+ }
+
+ if noNewPrivs == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// GetTsyncBit returns whether Thread Synchronization will be enabled on the
+// filter being loaded, or an error if an issue was encountered retrieving the
+// value.
+// Thread Sync ensures that all members of the thread group of the calling
+// process will share the same Seccomp filter set.
+// Tsync is a fairly recent addition to the Linux kernel and older kernels
+// lack support. If the running kernel does not support Tsync and it is
+// requested in a filter, Libseccomp will not enable TSync support and will
+// proceed as normal.
+// This function is unavailable before v2.2 of libseccomp and will return an
+// error.
+func (f *ScmpFilter) GetTsyncBit() (bool, error) {
+ tSync, err := f.getFilterAttr(filterAttrTsync)
+ if err != nil {
+ return false, err
+ }
+
+ if tSync == 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// SetBadArchAction sets the default action taken on a syscall for an
+// architecture not in the filter, or an error if an issue was encountered
+// setting the value.
+func (f *ScmpFilter) SetBadArchAction(action ScmpAction) error {
+ if err := sanitizeAction(action); err != nil {
+ return err
+ }
+
+ return f.setFilterAttr(filterAttrActBadArch, action.toNative())
+}
+
+// SetNoNewPrivsBit sets the state of the No New Privileges bit, which will be
+// applied on filter load, or an error if an issue was encountered setting the
+// value.
+// Filters with No New Privileges set to 0 can only be loaded if the process
+// has the CAP_SYS_ADMIN capability.
+func (f *ScmpFilter) SetNoNewPrivsBit(state bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if state {
+ toSet = 0x1
+ }
+
+ return f.setFilterAttr(filterAttrNNP, toSet)
+}
+
+// SetTsync sets whether Thread Synchronization will be enabled on the filter
+// being loaded. Returns an error if setting Tsync failed, or the filter is
+// invalid.
+// Thread Sync ensures that all members of the thread group of the calling
+// process will share the same Seccomp filter set.
+// Tsync is a fairly recent addition to the Linux kernel and older kernels
+// lack support. If the running kernel does not support Tsync and it is
+// requested in a filter, Libseccomp will not enable TSync support and will
+// proceed as normal.
+// This function is unavailable before v2.2 of libseccomp and will return an
+// error.
+func (f *ScmpFilter) SetTsync(enable bool) error {
+ var toSet C.uint32_t = 0x0
+
+ if enable {
+ toSet = 0x1
+ }
+
+ return f.setFilterAttr(filterAttrTsync, toSet)
+}
+
+// SetSyscallPriority sets a syscall's priority.
+// This provides a hint to the filter generator in libseccomp about the
+// importance of this syscall. High-priority syscalls are placed
+// first in the filter code, and incur less overhead (at the expense of
+// lower-priority syscalls).
+func (f *ScmpFilter) SetSyscallPriority(call ScmpSyscall, priority uint8) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_syscall_priority(f.filterCtx, C.int(call),
+ C.uint8_t(priority)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// AddRule adds a single rule for an unconditional action on a syscall.
+// Accepts the number of the syscall and the action to be taken on the call
+// being made.
+// Returns an error if an issue was encountered adding the rule.
+func (f *ScmpFilter) AddRule(call ScmpSyscall, action ScmpAction) error {
+ return f.addRuleGeneric(call, action, false, nil)
+}
+
+// AddRuleExact adds a single rule for an unconditional action on a syscall.
+// Accepts the number of the syscall and the action to be taken on the call
+// being made.
+// No modifications will be made to the rule, and it will fail to add if it
+// cannot be applied to the current architecture without modification.
+// The rule will function exactly as described, but it may not function identically
+// (or be able to be applied to) all architectures.
+// Returns an error if an issue was encountered adding the rule.
+func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error {
+ return f.addRuleGeneric(call, action, true, nil)
+}
+
+// AddRuleConditional adds a single rule for a conditional action on a syscall.
+// Returns an error if an issue was encountered adding the rule.
+// All conditions must match for the rule to match.
+// There is a bug in library versions below v2.2.1 which can, in some cases,
+// cause conditions to be lost when more than one are used. Consequently,
+// AddRuleConditional is disabled on library versions lower than v2.2.1
+func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
+ return f.addRuleGeneric(call, action, false, conds)
+}
+
+// AddRuleConditionalExact adds a single rule for a conditional action on a
+// syscall.
+// No modifications will be made to the rule, and it will fail to add if it
+// cannot be applied to the current architecture without modification.
+// The rule will function exactly as described, but it may not function identically
+// (or be able to be applied to) all architectures.
+// Returns an error if an issue was encountered adding the rule.
+// There is a bug in library versions below v2.2.1 which can, in some cases,
+// cause conditions to be lost when more than one are used. Consequently,
+// AddRuleConditionalExact is disabled on library versions lower than v2.2.1
+func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error {
+ return f.addRuleGeneric(call, action, true, conds)
+}
+
+// ExportPFC output PFC-formatted, human-readable dump of a filter context's
+// rules to a file.
+// Accepts file to write to (must be open for writing).
+// Returns an error if writing to the file fails.
+func (f *ScmpFilter) ExportPFC(file *os.File) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ fd := file.Fd()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_export_pfc(f.filterCtx, C.int(fd)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// ExportBPF outputs Berkeley Packet Filter-formatted, kernel-readable dump of a
+// filter context's rules to a file.
+// Accepts file to write to (must be open for writing).
+// Returns an error if writing to the file fails.
+func (f *ScmpFilter) ExportBPF(file *os.File) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ fd := file.Fd()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if retCode := C.seccomp_export_bpf(f.filterCtx, C.int(fd)); retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go
new file mode 100644
index 0000000..306ed17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -0,0 +1,461 @@
+// +build linux
+
+// Internal functions for libseccomp Go bindings
+// No exported functions
+
+package seccomp
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// Unexported C wrapping code - provides the C-Golang interface
+// Get the seccomp header in scope
+// Need stdlib.h for free() on cstrings
+
+// #cgo LDFLAGS: -lseccomp
+/*
+#include
+#include
+
+#if SCMP_VER_MAJOR < 2
+#error Minimum supported version of Libseccomp is v2.1.0
+#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 1
+#error Minimum supported version of Libseccomp is v2.1.0
+#endif
+
+#define ARCH_BAD ~0
+
+const uint32_t C_ARCH_BAD = ARCH_BAD;
+
+#ifndef SCMP_ARCH_AARCH64
+#define SCMP_ARCH_AARCH64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS
+#define SCMP_ARCH_MIPS ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS64
+#define SCMP_ARCH_MIPS64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPS64N32
+#define SCMP_ARCH_MIPS64N32 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL
+#define SCMP_ARCH_MIPSEL ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL64
+#define SCMP_ARCH_MIPSEL64 ARCH_BAD
+#endif
+
+#ifndef SCMP_ARCH_MIPSEL64N32
+#define SCMP_ARCH_MIPSEL64N32 ARCH_BAD
+#endif
+
+const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
+const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
+const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
+const uint32_t C_ARCH_X32 = SCMP_ARCH_X32;
+const uint32_t C_ARCH_ARM = SCMP_ARCH_ARM;
+const uint32_t C_ARCH_AARCH64 = SCMP_ARCH_AARCH64;
+const uint32_t C_ARCH_MIPS = SCMP_ARCH_MIPS;
+const uint32_t C_ARCH_MIPS64 = SCMP_ARCH_MIPS64;
+const uint32_t C_ARCH_MIPS64N32 = SCMP_ARCH_MIPS64N32;
+const uint32_t C_ARCH_MIPSEL = SCMP_ARCH_MIPSEL;
+const uint32_t C_ARCH_MIPSEL64 = SCMP_ARCH_MIPSEL64;
+const uint32_t C_ARCH_MIPSEL64N32 = SCMP_ARCH_MIPSEL64N32;
+
+const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
+const uint32_t C_ACT_TRAP = SCMP_ACT_TRAP;
+const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
+const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
+const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW;
+
+// If TSync is not supported, make sure it doesn't map to a supported filter attribute
+// Don't worry about major version < 2, the minimum version checks should catch that case
+#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2
+#define SCMP_FLTATR_CTL_TSYNC _SCMP_CMP_MIN
+#endif
+
+const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
+const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
+const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
+const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC;
+
+const int C_CMP_NE = (int)SCMP_CMP_NE;
+const int C_CMP_LT = (int)SCMP_CMP_LT;
+const int C_CMP_LE = (int)SCMP_CMP_LE;
+const int C_CMP_EQ = (int)SCMP_CMP_EQ;
+const int C_CMP_GE = (int)SCMP_CMP_GE;
+const int C_CMP_GT = (int)SCMP_CMP_GT;
+const int C_CMP_MASKED_EQ = (int)SCMP_CMP_MASKED_EQ;
+
+const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
+const int C_VERSION_MINOR = SCMP_VER_MINOR;
+const int C_VERSION_MICRO = SCMP_VER_MICRO;
+
+typedef struct scmp_arg_cmp* scmp_cast_t;
+
+// Wrapper to create an scmp_arg_cmp struct
+void*
+make_struct_arg_cmp(
+ unsigned int arg,
+ int compare,
+ uint64_t a,
+ uint64_t b
+ )
+{
+ struct scmp_arg_cmp *s = malloc(sizeof(struct scmp_arg_cmp));
+
+ s->arg = arg;
+ s->op = compare;
+ s->datum_a = a;
+ s->datum_b = b;
+
+ return s;
+}
+*/
+import "C"
+
+// Nonexported types
+type scmpFilterAttr uint32
+
+// Nonexported constants
+
+const (
+ filterAttrActDefault scmpFilterAttr = iota
+ filterAttrActBadArch scmpFilterAttr = iota
+ filterAttrNNP scmpFilterAttr = iota
+ filterAttrTsync scmpFilterAttr = iota
+)
+
+const (
+ // An error return from certain libseccomp functions
+ scmpError C.int = -1
+ // Comparison boundaries to check for architecture validity
+ archStart ScmpArch = ArchNative
+ archEnd ScmpArch = ArchMIPSEL64N32
+ // Comparison boundaries to check for action validity
+ actionStart ScmpAction = ActKill
+ actionEnd ScmpAction = ActAllow
+ // Comparison boundaries to check for comparison operator validity
+ compareOpStart ScmpCompareOp = CompareNotEqual
+ compareOpEnd ScmpCompareOp = CompareMaskedEqual
+)
+
+var (
+ // Error thrown on bad filter context
+ errBadFilter = fmt.Errorf("filter is invalid or uninitialized")
+ // Constants representing library major, minor, and micro versions
+ verMajor = int(C.C_VERSION_MAJOR)
+ verMinor = int(C.C_VERSION_MINOR)
+ verMicro = int(C.C_VERSION_MICRO)
+)
+
+// Nonexported functions
+
+// Check if library version is greater than or equal to the given one
+func checkVersionAbove(major, minor, micro int) bool {
+ return (verMajor > major) ||
+ (verMajor == major && verMinor > minor) ||
+ (verMajor == major && verMinor == minor && verMicro >= micro)
+}
+
+// Init function: Verify library version is appropriate
+func init() {
+ if !checkVersionAbove(2, 1, 0) {
+ fmt.Fprintf(os.Stderr, "Libseccomp version too low: minimum supported is 2.1.0, detected %d.%d.%d", C.C_VERSION_MAJOR, C.C_VERSION_MINOR, C.C_VERSION_MICRO)
+ os.Exit(-1)
+ }
+}
+
+// Filter helpers
+
+// Filter finalizer - ensure that kernel context for filters is freed
+func filterFinalizer(f *ScmpFilter) {
+ f.Release()
+}
+
+// Get a raw filter attribute
+func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return 0x0, errBadFilter
+ }
+
+ if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
+ return 0x0, fmt.Errorf("the thread synchronization attribute is not supported in this version of the library")
+ }
+
+ var attribute C.uint32_t
+
+ retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
+ if retCode != 0 {
+ return 0x0, syscall.Errno(-1 * retCode)
+ }
+
+ return attribute, nil
+}
+
+// Set a raw filter attribute
+func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if !checkVersionAbove(2, 2, 0) && attr == filterAttrTsync {
+ return fmt.Errorf("the thread synchronization attribute is not supported in this version of the library")
+ }
+
+ retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
+ if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// DOES NOT LOCK OR CHECK VALIDITY
+// Assumes caller has already done this
+// Wrapper for seccomp_rule_add_... functions
+func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact bool, cond C.scmp_cast_t) error {
+ var length C.uint
+ if cond != nil {
+ length = 1
+ } else {
+ length = 0
+ }
+
+ var retCode C.int
+ if exact {
+ retCode = C.seccomp_rule_add_exact_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
+ } else {
+ retCode = C.seccomp_rule_add_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
+ }
+
+ if syscall.Errno(-1*retCode) == syscall.EFAULT {
+ return fmt.Errorf("unrecognized syscall")
+ } else if syscall.Errno(-1*retCode) == syscall.EPERM {
+ return fmt.Errorf("requested action matches default action of filter")
+ } else if retCode != 0 {
+ return syscall.Errno(-1 * retCode)
+ }
+
+ return nil
+}
+
+// Generic add function for filter rules
+func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact bool, conds []ScmpCondition) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if !f.valid {
+ return errBadFilter
+ }
+
+ if len(conds) == 0 {
+ if err := f.addRuleWrapper(call, action, exact, nil); err != nil {
+ return err
+ }
+ } else {
+ // We don't support conditional filtering in library version v2.1
+ if !checkVersionAbove(2, 2, 1) {
+ return fmt.Errorf("conditional filtering requires libseccomp version >= 2.2.1")
+ }
+
+ for _, cond := range conds {
+ cmpStruct := C.make_struct_arg_cmp(C.uint(cond.Argument), cond.Op.toNative(), C.uint64_t(cond.Operand1), C.uint64_t(cond.Operand2))
+ defer C.free(cmpStruct)
+
+ if err := f.addRuleWrapper(call, action, exact, C.scmp_cast_t(cmpStruct)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// Generic Helpers
+
+// Helper - Sanitize Arch token input
+func sanitizeArch(in ScmpArch) error {
+ if in < archStart || in > archEnd {
+ return fmt.Errorf("unrecognized architecture")
+ }
+
+ if in.toNative() == C.C_ARCH_BAD {
+ return fmt.Errorf("architecture is not supported on this version of the library")
+ }
+
+ return nil
+}
+
+func sanitizeAction(in ScmpAction) error {
+ inTmp := in & 0x0000FFFF
+ if inTmp < actionStart || inTmp > actionEnd {
+ return fmt.Errorf("unrecognized action")
+ }
+
+ if inTmp != ActTrace && inTmp != ActErrno && (in&0xFFFF0000) != 0 {
+ return fmt.Errorf("highest 16 bits must be zeroed except for Trace and Errno")
+ }
+
+ return nil
+}
+
+func sanitizeCompareOp(in ScmpCompareOp) error {
+ if in < compareOpStart || in > compareOpEnd {
+ return fmt.Errorf("unrecognized comparison operator")
+ }
+
+ return nil
+}
+
+func archFromNative(a C.uint32_t) (ScmpArch, error) {
+ switch a {
+ case C.C_ARCH_X86:
+ return ArchX86, nil
+ case C.C_ARCH_X86_64:
+ return ArchAMD64, nil
+ case C.C_ARCH_X32:
+ return ArchX32, nil
+ case C.C_ARCH_ARM:
+ return ArchARM, nil
+ case C.C_ARCH_NATIVE:
+ return ArchNative, nil
+ case C.C_ARCH_AARCH64:
+ return ArchARM64, nil
+ case C.C_ARCH_MIPS:
+ return ArchMIPS, nil
+ case C.C_ARCH_MIPS64:
+ return ArchMIPS64, nil
+ case C.C_ARCH_MIPS64N32:
+ return ArchMIPS64N32, nil
+ case C.C_ARCH_MIPSEL:
+ return ArchMIPSEL, nil
+ case C.C_ARCH_MIPSEL64:
+ return ArchMIPSEL64, nil
+ case C.C_ARCH_MIPSEL64N32:
+ return ArchMIPSEL64N32, nil
+ default:
+ return 0x0, fmt.Errorf("unrecognized architecture")
+ }
+}
+
+// Only use with sanitized arches, no error handling
+func (a ScmpArch) toNative() C.uint32_t {
+ switch a {
+ case ArchX86:
+ return C.C_ARCH_X86
+ case ArchAMD64:
+ return C.C_ARCH_X86_64
+ case ArchX32:
+ return C.C_ARCH_X32
+ case ArchARM:
+ return C.C_ARCH_ARM
+ case ArchARM64:
+ return C.C_ARCH_AARCH64
+ case ArchMIPS:
+ return C.C_ARCH_MIPS
+ case ArchMIPS64:
+ return C.C_ARCH_MIPS64
+ case ArchMIPS64N32:
+ return C.C_ARCH_MIPS64N32
+ case ArchMIPSEL:
+ return C.C_ARCH_MIPSEL
+ case ArchMIPSEL64:
+ return C.C_ARCH_MIPSEL64
+ case ArchMIPSEL64N32:
+ return C.C_ARCH_MIPSEL64N32
+ case ArchNative:
+ return C.C_ARCH_NATIVE
+ default:
+ return 0x0
+ }
+}
+
+// Only use with sanitized ops, no error handling
+func (a ScmpCompareOp) toNative() C.int {
+ switch a {
+ case CompareNotEqual:
+ return C.C_CMP_NE
+ case CompareLess:
+ return C.C_CMP_LT
+ case CompareLessOrEqual:
+ return C.C_CMP_LE
+ case CompareEqual:
+ return C.C_CMP_EQ
+ case CompareGreaterEqual:
+ return C.C_CMP_GE
+ case CompareGreater:
+ return C.C_CMP_GT
+ case CompareMaskedEqual:
+ return C.C_CMP_MASKED_EQ
+ default:
+ return 0x0
+ }
+}
+
+func actionFromNative(a C.uint32_t) (ScmpAction, error) {
+ aTmp := a & 0xFFFF
+ switch a & 0xFFFF0000 {
+ case C.C_ACT_KILL:
+ return ActKill, nil
+ case C.C_ACT_TRAP:
+ return ActTrap, nil
+ case C.C_ACT_ERRNO:
+ return ActErrno.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_TRACE:
+ return ActTrace.SetReturnCode(int16(aTmp)), nil
+ case C.C_ACT_ALLOW:
+ return ActAllow, nil
+ default:
+ return 0x0, fmt.Errorf("unrecognized action")
+ }
+}
+
+// Only use with sanitized actions, no error handling
+func (a ScmpAction) toNative() C.uint32_t {
+ switch a & 0xFFFF {
+ case ActKill:
+ return C.C_ACT_KILL
+ case ActTrap:
+ return C.C_ACT_TRAP
+ case ActErrno:
+ return C.C_ACT_ERRNO | (C.uint32_t(a) >> 16)
+ case ActTrace:
+ return C.C_ACT_TRACE | (C.uint32_t(a) >> 16)
+ case ActAllow:
+ return C.C_ACT_ALLOW
+ default:
+ return 0x0
+ }
+}
+
+// Internal only, assumes safe attribute
+func (a scmpFilterAttr) toNative() uint32 {
+ switch a {
+ case filterAttrActDefault:
+ return uint32(C.C_ATTRIBUTE_DEFAULT)
+ case filterAttrActBadArch:
+ return uint32(C.C_ATTRIBUTE_BADARCH)
+ case filterAttrNNP:
+ return uint32(C.C_ATTRIBUTE_NNP)
+ case filterAttrTsync:
+ return uint32(C.C_ATTRIBUTE_TSYNC)
+ default:
+ return 0x0
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md
new file mode 100644
index 0000000..2199945
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/LICENSE.md
@@ -0,0 +1,23 @@
+objx - by Mat Ryer and Tyler Bunnell
+
+The MIT License (MIT)
+
+Copyright (c) 2014 Stretchr, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/README.md b/Godeps/_workspace/src/github.com/stretchr/objx/README.md
new file mode 100644
index 0000000..4aa1806
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/README.md
@@ -0,0 +1,3 @@
+# objx
+
+ * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx)
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go
new file mode 100644
index 0000000..721bcac
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go
@@ -0,0 +1,179 @@
+package objx
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// arrayAccesRegexString is the regex used to extract the array number
+// from the access path
+const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
+
+// arrayAccesRegex is the compiled arrayAccesRegexString
+var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
+
+// Get gets the value using the specified selector and
+// returns it inside a new Obj object.
+//
+// If it cannot find the value, Get will return a nil
+// value inside an instance of Obj.
+//
+// Get can only operate directly on map[string]interface{} and []interface.
+//
+// Example
+//
+// To access the title of the third chapter of the second book, do:
+//
+// o.Get("books[1].chapters[2].title")
+func (m Map) Get(selector string) *Value {
+ rawObj := access(m, selector, nil, false, false)
+ return &Value{data: rawObj}
+}
+
+// Set sets the value using the specified selector and
+// returns the object on which Set was called.
+//
+// Set can only operate directly on map[string]interface{} and []interface
+//
+// Example
+//
+// To set the title of the third chapter of the second book, do:
+//
+// o.Set("books[1].chapters[2].title","Time to Go")
+func (m Map) Set(selector string, value interface{}) Map {
+ access(m, selector, value, true, false)
+ return m
+}
+
+// access accesses the object using the selector and performs the
+// appropriate action.
+func access(current, selector, value interface{}, isSet, panics bool) interface{} {
+
+ switch selector.(type) {
+ case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+
+ if array, ok := current.([]interface{}); ok {
+ index := intFromInterface(selector)
+
+ if index >= len(array) {
+ if panics {
+ panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
+ }
+ return nil
+ }
+
+ return array[index]
+ }
+
+ return nil
+
+ case string:
+
+ selStr := selector.(string)
+ selSegs := strings.SplitN(selStr, PathSeparator, 2)
+ thisSel := selSegs[0]
+ index := -1
+ var err error
+
+ // https://github.com/stretchr/objx/issues/12
+ if strings.Contains(thisSel, "[") {
+
+ arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel)
+
+ if len(arrayMatches) > 0 {
+
+ // Get the key into the map
+ thisSel = arrayMatches[1]
+
+ // Get the index into the array at the key
+ index, err = strconv.Atoi(arrayMatches[2])
+
+ if err != nil {
+ // This should never happen. If it does, something has gone
+ // seriously wrong. Panic.
+ panic("objx: Array index is not an integer. Must use array[int].")
+ }
+
+ }
+ }
+
+ if curMap, ok := current.(Map); ok {
+ current = map[string]interface{}(curMap)
+ }
+
+ // get the object in question
+ switch current.(type) {
+ case map[string]interface{}:
+ curMSI := current.(map[string]interface{})
+ if len(selSegs) <= 1 && isSet {
+ curMSI[thisSel] = value
+ return nil
+ } else {
+ current = curMSI[thisSel]
+ }
+ default:
+ current = nil
+ }
+
+ if current == nil && panics {
+ panic(fmt.Sprintf("objx: '%v' invalid on object.", selector))
+ }
+
+ // do we need to access the item of an array?
+ if index > -1 {
+ if array, ok := current.([]interface{}); ok {
+ if index < len(array) {
+ current = array[index]
+ } else {
+ if panics {
+ panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
+ }
+ current = nil
+ }
+ }
+ }
+
+ if len(selSegs) > 1 {
+ current = access(current, selSegs[1], value, isSet, panics)
+ }
+
+ }
+
+ return current
+
+}
+
+// intFromInterface converts an interface object to the largest
+// representation of an unsigned integer using a type switch and
+// assertions
+func intFromInterface(selector interface{}) int {
+ var value int
+ switch selector.(type) {
+ case int:
+ value = selector.(int)
+ case int8:
+ value = int(selector.(int8))
+ case int16:
+ value = int(selector.(int16))
+ case int32:
+ value = int(selector.(int32))
+ case int64:
+ value = int(selector.(int64))
+ case uint:
+ value = int(selector.(uint))
+ case uint8:
+ value = int(selector.(uint8))
+ case uint16:
+ value = int(selector.(uint16))
+ case uint32:
+ value = int(selector.(uint32))
+ case uint64:
+ value = int(selector.(uint64))
+ default:
+ panic("objx: array access argument is not an integer type (this should never happen)")
+ }
+
+ return value
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt
new file mode 100644
index 0000000..3060234
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt
@@ -0,0 +1,14 @@
+ case []{1}:
+ a := object.([]{1})
+ if isSet {
+ a[index] = value.({1})
+ } else {
+ if index >= len(a) {
+ if panics {
+ panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a)))
+ }
+ return nil
+ } else {
+ return a[index]
+ }
+ }
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html
new file mode 100644
index 0000000..379ffc3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html
@@ -0,0 +1,86 @@
+
+
+
+ Codegen
+
+
+
+
+
+
+ Template
+
+
+ Use {x}
as a placeholder for each argument.
+
+
+
+
+ Arguments (comma separated)
+
+
+ One block per line
+
+
+
+
+ Output
+
+
+
+
+
+
+
+
+
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt
new file mode 100644
index 0000000..b396900
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt
@@ -0,0 +1,286 @@
+/*
+ {4} ({1} and []{1})
+ --------------------------------------------------
+*/
+
+// {4} gets the value as a {1}, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) {4}(optionalDefault ...{1}) {1} {
+ if s, ok := v.data.({1}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return {3}
+}
+
+// Must{4} gets the value as a {1}.
+//
+// Panics if the object is not a {1}.
+func (v *Value) Must{4}() {1} {
+ return v.data.({1})
+}
+
+// {4}Slice gets the value as a []{1}, returns the optionalDefault
+// value or nil if the value is not a []{1}.
+func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} {
+ if s, ok := v.data.([]{1}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// Must{4}Slice gets the value as a []{1}.
+//
+// Panics if the object is not a []{1}.
+func (v *Value) Must{4}Slice() []{1} {
+ return v.data.([]{1})
+}
+
+// Is{4} gets whether the object contained is a {1} or not.
+func (v *Value) Is{4}() bool {
+ _, ok := v.data.({1})
+ return ok
+}
+
+// Is{4}Slice gets whether the object contained is a []{1} or not.
+func (v *Value) Is{4}Slice() bool {
+ _, ok := v.data.([]{1})
+ return ok
+}
+
+// Each{4} calls the specified callback for each object
+// in the []{1}.
+//
+// Panics if the object is the wrong type.
+func (v *Value) Each{4}(callback func(int, {1}) bool) *Value {
+
+ for index, val := range v.Must{4}Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// Where{4} uses the specified decider function to select items
+// from the []{1}. The object contained in the result will contain
+// only the selected items.
+func (v *Value) Where{4}(decider func(int, {1}) bool) *Value {
+
+ var selected []{1}
+
+ v.Each{4}(func(index int, val {1}) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data:selected}
+
+}
+
+// Group{4} uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]{1}.
+func (v *Value) Group{4}(grouper func(int, {1}) string) *Value {
+
+ groups := make(map[string][]{1})
+
+ v.Each{4}(func(index int, val {1}) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]{1}, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data:groups}
+
+}
+
+// Replace{4} uses the specified function to replace each {1}s
+// by iterating each item. The data in the returned result will be a
+// []{1} containing the replaced items.
+func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value {
+
+ arr := v.Must{4}Slice()
+ replaced := make([]{1}, len(arr))
+
+ v.Each{4}(func(index int, val {1}) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data:replaced}
+
+}
+
+// Collect{4} uses the specified collector function to collect a value
+// for each of the {1}s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value {
+
+ arr := v.Must{4}Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.Each{4}(func(index int, val {1}) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data:collected}
+}
+
+// ************************************************************
+// TESTS
+// ************************************************************
+
+func Test{4}(t *testing.T) {
+
+ val := {1}( {2} )
+ m := map[string]interface{}{"value": val, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").{4}())
+ assert.Equal(t, val, New(m).Get("value").Must{4}())
+ assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}())
+ assert.Equal(t, val, New(m).Get("nothing").{4}({2}))
+
+ assert.Panics(t, func() {
+ New(m).Get("age").Must{4}()
+ })
+
+}
+
+func Test{4}Slice(t *testing.T) {
+
+ val := {1}( {2} )
+ m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil}
+ assert.Equal(t, val, New(m).Get("value").{4}Slice()[0])
+ assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0])
+ assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice())
+ assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0])
+
+ assert.Panics(t, func() {
+ New(m).Get("nothing").Must{4}Slice()
+ })
+
+}
+
+func TestIs{4}(t *testing.T) {
+
+ var v *Value
+
+ v = &Value{data: {1}({2})}
+ assert.True(t, v.Is{4}())
+
+ v = &Value{data: []{1}{ {1}({2}) }}
+ assert.True(t, v.Is{4}Slice())
+
+}
+
+func TestEach{4}(t *testing.T) {
+
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+ count := 0
+ replacedVals := make([]{1}, 0)
+ assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool {
+
+ count++
+ replacedVals = append(replacedVals, val)
+
+ // abort early
+ if i == 2 {
+ return false
+ }
+
+ return true
+
+ }))
+
+ assert.Equal(t, count, 3)
+ assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0])
+ assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1])
+ assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2])
+
+}
+
+func TestWhere{4}(t *testing.T) {
+
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ selected := v.Where{4}(func(i int, val {1}) bool {
+ return i%2==0
+ }).Must{4}Slice()
+
+ assert.Equal(t, 3, len(selected))
+
+}
+
+func TestGroup{4}(t *testing.T) {
+
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ grouped := v.Group{4}(func(i int, val {1}) string {
+ return fmt.Sprintf("%v", i%2==0)
+ }).data.(map[string][]{1})
+
+ assert.Equal(t, 2, len(grouped))
+ assert.Equal(t, 3, len(grouped["true"]))
+ assert.Equal(t, 3, len(grouped["false"]))
+
+}
+
+func TestReplace{4}(t *testing.T) {
+
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ rawArr := v.Must{4}Slice()
+
+ replaced := v.Replace{4}(func(index int, val {1}) {1} {
+ if index < len(rawArr)-1 {
+ return rawArr[index+1]
+ }
+ return rawArr[0]
+ })
+
+ replacedArr := replaced.Must{4}Slice()
+ if assert.Equal(t, 6, len(replacedArr)) {
+ assert.Equal(t, replacedArr[0], rawArr[1])
+ assert.Equal(t, replacedArr[1], rawArr[2])
+ assert.Equal(t, replacedArr[2], rawArr[3])
+ assert.Equal(t, replacedArr[3], rawArr[4])
+ assert.Equal(t, replacedArr[4], rawArr[5])
+ assert.Equal(t, replacedArr[5], rawArr[0])
+ }
+
+}
+
+func TestCollect{4}(t *testing.T) {
+
+ v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
+
+ collected := v.Collect{4}(func(index int, val {1}) interface{} {
+ return index
+ })
+
+ collectedArr := collected.MustInterSlice()
+ if assert.Equal(t, 6, len(collectedArr)) {
+ assert.Equal(t, collectedArr[0], 0)
+ assert.Equal(t, collectedArr[1], 1)
+ assert.Equal(t, collectedArr[2], 2)
+ assert.Equal(t, collectedArr[3], 3)
+ assert.Equal(t, collectedArr[4], 4)
+ assert.Equal(t, collectedArr[5], 5)
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt
new file mode 100644
index 0000000..069d43d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt
@@ -0,0 +1,20 @@
+Interface,interface{},"something",nil,Inter
+Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI
+ObjxMap,(Map),New(1),New(nil),ObjxMap
+Bool,bool,true,false,Bool
+String,string,"hello","",Str
+Int,int,1,0,Int
+Int8,int8,1,0,Int8
+Int16,int16,1,0,Int16
+Int32,int32,1,0,Int32
+Int64,int64,1,0,Int64
+Uint,uint,1,0,Uint
+Uint8,uint8,1,0,Uint8
+Uint16,uint16,1,0,Uint16
+Uint32,uint32,1,0,Uint32
+Uint64,uint64,1,0,Uint64
+Uintptr,uintptr,1,0,Uintptr
+Float32,float32,1,0,Float32
+Float64,float64,1,0,Float64
+Complex64,complex64,1,0,Complex64
+Complex128,complex128,1,0,Complex128
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go
new file mode 100644
index 0000000..f9eb42a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go
@@ -0,0 +1,13 @@
+package objx
+
+const (
+ // PathSeparator is the character used to separate the elements
+ // of the keypath.
+ //
+ // For example, `location.address.city`
+ PathSeparator string = "."
+
+ // SignatureSeparator is the character that is used to
+ // separate the Base64 string from the security signature.
+ SignatureSeparator = "_"
+)
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go
new file mode 100644
index 0000000..9cdfa9f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go
@@ -0,0 +1,117 @@
+package objx
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+)
+
+// JSON converts the contained object to a JSON string
+// representation
+func (m Map) JSON() (string, error) {
+
+ result, err := json.Marshal(m)
+
+ if err != nil {
+ err = errors.New("objx: JSON encode failed with: " + err.Error())
+ }
+
+ return string(result), err
+
+}
+
+// MustJSON converts the contained object to a JSON string
+// representation and panics if there is an error
+func (m Map) MustJSON() string {
+ result, err := m.JSON()
+ if err != nil {
+ panic(err.Error())
+ }
+ return result
+}
+
+// Base64 converts the contained object to a Base64 string
+// representation of the JSON string representation
+func (m Map) Base64() (string, error) {
+
+ var buf bytes.Buffer
+
+ jsonData, err := m.JSON()
+ if err != nil {
+ return "", err
+ }
+
+ encoder := base64.NewEncoder(base64.StdEncoding, &buf)
+ encoder.Write([]byte(jsonData))
+ encoder.Close()
+
+ return buf.String(), nil
+
+}
+
+// MustBase64 converts the contained object to a Base64 string
+// representation of the JSON string representation and panics
+// if there is an error
+func (m Map) MustBase64() string {
+ result, err := m.Base64()
+ if err != nil {
+ panic(err.Error())
+ }
+ return result
+}
+
+// SignedBase64 converts the contained object to a Base64 string
+// representation of the JSON string representation and signs it
+// using the provided key.
+func (m Map) SignedBase64(key string) (string, error) {
+
+ base64, err := m.Base64()
+ if err != nil {
+ return "", err
+ }
+
+ sig := HashWithKey(base64, key)
+
+ return base64 + SignatureSeparator + sig, nil
+
+}
+
+// MustSignedBase64 converts the contained object to a Base64 string
+// representation of the JSON string representation and signs it
+// using the provided key and panics if there is an error
+func (m Map) MustSignedBase64(key string) string {
+ result, err := m.SignedBase64(key)
+ if err != nil {
+ panic(err.Error())
+ }
+ return result
+}
+
+/*
+ URL Query
+ ------------------------------------------------
+*/
+
+// URLValues creates a url.Values object from an Obj. This
+// function requires that the wrapped object be a map[string]interface{}
+func (m Map) URLValues() url.Values {
+
+ vals := make(url.Values)
+
+ for k, v := range m {
+ //TODO: can this be done without sprintf?
+ vals.Set(k, fmt.Sprintf("%v", v))
+ }
+
+ return vals
+}
+
+// URLQuery gets an encoded URL query representing the given
+// Obj. This function requires that the wrapped object be a
+// map[string]interface{}
+func (m Map) URLQuery() (string, error) {
+ return m.URLValues().Encode(), nil
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go
new file mode 100644
index 0000000..47bf85e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go
@@ -0,0 +1,72 @@
+// objx - Go package for dealing with maps, slices, JSON and other data.
+//
+// Overview
+//
+// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
+// a powerful `Get` method (among others) that allows you to easily and quickly get
+// access to data within the map, without having to worry too much about type assertions,
+// missing data, default values etc.
+//
+// Pattern
+//
+// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s
+// easy.
+//
+// Call one of the `objx.` functions to create your `objx.Map` to get going:
+//
+// m, err := objx.FromJSON(json)
+//
+// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
+// the rest will be optimistic and try to figure things out without panicking.
+//
+// Use `Get` to access the value you're interested in. You can use dot and array
+// notation too:
+//
+// m.Get("places[0].latlng")
+//
+// Once you have saught the `Value` you're interested in, you can use the `Is*` methods
+// to determine its type.
+//
+// if m.Get("code").IsStr() { /* ... */ }
+//
+// Or you can just assume the type, and use one of the strong type methods to
+// extract the real value:
+//
+// m.Get("code").Int()
+//
+// If there's no value there (or if it's the wrong type) then a default value
+// will be returned, or you can be explicit about the default value.
+//
+// Get("code").Int(-1)
+//
+// If you're dealing with a slice of data as a value, Objx provides many useful
+// methods for iterating, manipulating and selecting that data. You can find out more
+// by exploring the index below.
+//
+// Reading data
+//
+// A simple example of how to use Objx:
+//
+// // use MustFromJSON to make an objx.Map from some JSON
+// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
+//
+// // get the details
+// name := m.Get("name").Str()
+// age := m.Get("age").Int()
+//
+// // get their nickname (or use their name if they
+// // don't have one)
+// nickname := m.Get("nickname").Str(name)
+//
+// Ranging
+//
+// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For
+// example, to `range` the data, do what you would expect:
+//
+// m := objx.MustFromJSON(json)
+// for key, value := range m {
+//
+// /* ... do your magic ... */
+//
+// }
+package objx
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map.go b/Godeps/_workspace/src/github.com/stretchr/objx/map.go
new file mode 100644
index 0000000..eb6ed8e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/map.go
@@ -0,0 +1,222 @@
+package objx
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/url"
+ "strings"
+)
+
+// MSIConvertable is an interface that defines methods for converting your
+// custom types to a map[string]interface{} representation.
+type MSIConvertable interface {
+ // MSI gets a map[string]interface{} (msi) representing the
+ // object.
+ MSI() map[string]interface{}
+}
+
+// Map provides extended functionality for working with
+// untyped data, in particular map[string]interface (msi).
+type Map map[string]interface{}
+
+// Value returns the internal value instance
+func (m Map) Value() *Value {
+ return &Value{data: m}
+}
+
+// Nil represents a nil Map.
+var Nil Map = New(nil)
+
+// New creates a new Map containing the map[string]interface{} in the data argument.
+// If the data argument is not a map[string]interface, New attempts to call the
+// MSI() method on the MSIConvertable interface to create one.
+func New(data interface{}) Map {
+ if _, ok := data.(map[string]interface{}); !ok {
+ if converter, ok := data.(MSIConvertable); ok {
+ data = converter.MSI()
+ } else {
+ return nil
+ }
+ }
+ return Map(data.(map[string]interface{}))
+}
+
+// MSI creates a map[string]interface{} and puts it inside a new Map.
+//
+// The arguments follow a key, value pattern.
+//
+// Panics
+//
+// Panics if any key arugment is non-string or if there are an odd number of arguments.
+//
+// Example
+//
+// To easily create Maps:
+//
+// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
+//
+// // creates an Map equivalent to
+// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}})
+func MSI(keyAndValuePairs ...interface{}) Map {
+
+ newMap := make(map[string]interface{})
+ keyAndValuePairsLen := len(keyAndValuePairs)
+
+ if keyAndValuePairsLen%2 != 0 {
+ panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.")
+ }
+
+ for i := 0; i < keyAndValuePairsLen; i = i + 2 {
+
+ key := keyAndValuePairs[i]
+ value := keyAndValuePairs[i+1]
+
+ // make sure the key is a string
+ keyString, keyStringOK := key.(string)
+ if !keyStringOK {
+ panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.")
+ }
+
+ newMap[keyString] = value
+
+ }
+
+ return New(newMap)
+}
+
+// ****** Conversion Constructors
+
+// MustFromJSON creates a new Map containing the data specified in the
+// jsonString.
+//
+// Panics if the JSON is invalid.
+func MustFromJSON(jsonString string) Map {
+ o, err := FromJSON(jsonString)
+
+ if err != nil {
+ panic("objx: MustFromJSON failed with error: " + err.Error())
+ }
+
+ return o
+}
+
+// FromJSON creates a new Map containing the data specified in the
+// jsonString.
+//
+// Returns an error if the JSON is invalid.
+func FromJSON(jsonString string) (Map, error) {
+
+ var data interface{}
+ err := json.Unmarshal([]byte(jsonString), &data)
+
+ if err != nil {
+ return Nil, err
+ }
+
+ return New(data), nil
+
+}
+
+// FromBase64 creates a new Obj containing the data specified
+// in the Base64 string.
+//
+// The string is an encoded JSON string returned by Base64
+func FromBase64(base64String string) (Map, error) {
+
+ decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String))
+
+ decoded, err := ioutil.ReadAll(decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ return FromJSON(string(decoded))
+}
+
+// MustFromBase64 creates a new Obj containing the data specified
+// in the Base64 string and panics if there is an error.
+//
+// The string is an encoded JSON string returned by Base64
+func MustFromBase64(base64String string) Map {
+
+ result, err := FromBase64(base64String)
+
+ if err != nil {
+ panic("objx: MustFromBase64 failed with error: " + err.Error())
+ }
+
+ return result
+}
+
+// FromSignedBase64 creates a new Obj containing the data specified
+// in the Base64 string.
+//
+// The string is an encoded JSON string returned by SignedBase64
+func FromSignedBase64(base64String, key string) (Map, error) {
+ parts := strings.Split(base64String, SignatureSeparator)
+ if len(parts) != 2 {
+ return nil, errors.New("objx: Signed base64 string is malformed.")
+ }
+
+ sig := HashWithKey(parts[0], key)
+ if parts[1] != sig {
+ return nil, errors.New("objx: Signature for base64 data does not match.")
+ }
+
+ return FromBase64(parts[0])
+}
+
+// MustFromSignedBase64 creates a new Obj containing the data specified
+// in the Base64 string and panics if there is an error.
+//
+// The string is an encoded JSON string returned by Base64
+func MustFromSignedBase64(base64String, key string) Map {
+
+ result, err := FromSignedBase64(base64String, key)
+
+ if err != nil {
+ panic("objx: MustFromSignedBase64 failed with error: " + err.Error())
+ }
+
+ return result
+}
+
+// FromURLQuery generates a new Obj by parsing the specified
+// query.
+//
+// For queries with multiple values, the first value is selected.
+func FromURLQuery(query string) (Map, error) {
+
+ vals, err := url.ParseQuery(query)
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := make(map[string]interface{})
+ for k, vals := range vals {
+ m[k] = vals[0]
+ }
+
+ return New(m), nil
+}
+
+// MustFromURLQuery generates a new Obj by parsing the specified
+// query.
+//
+// For queries with multiple values, the first value is selected.
+//
+// Panics if it encounters an error
+func MustFromURLQuery(query string) Map {
+
+ o, err := FromURLQuery(query)
+
+ if err != nil {
+ panic("objx: MustFromURLQuery failed with error: " + err.Error())
+ }
+
+ return o
+
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go
new file mode 100644
index 0000000..b35c863
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go
@@ -0,0 +1,81 @@
+package objx
+
+// Exclude returns a new Map with the keys in the specified []string
+// excluded.
+func (d Map) Exclude(exclude []string) Map {
+
+ excluded := make(Map)
+ for k, v := range d {
+ var shouldInclude bool = true
+ for _, toExclude := range exclude {
+ if k == toExclude {
+ shouldInclude = false
+ break
+ }
+ }
+ if shouldInclude {
+ excluded[k] = v
+ }
+ }
+
+ return excluded
+}
+
+// Copy creates a shallow copy of the Obj.
+func (m Map) Copy() Map {
+ copied := make(map[string]interface{})
+ for k, v := range m {
+ copied[k] = v
+ }
+ return New(copied)
+}
+
+// Merge blends the specified map with a copy of this map and returns the result.
+//
+// Keys that appear in both will be selected from the specified map.
+// This method requires that the wrapped object be a map[string]interface{}
+func (m Map) Merge(merge Map) Map {
+ return m.Copy().MergeHere(merge)
+}
+
+// Merge blends the specified map with this map and returns the current map.
+//
+// Keys that appear in both will be selected from the specified map. The original map
+// will be modified. This method requires that
+// the wrapped object be a map[string]interface{}
+func (m Map) MergeHere(merge Map) Map {
+
+ for k, v := range merge {
+ m[k] = v
+ }
+
+ return m
+
+}
+
+// Transform builds a new Obj giving the transformer a chance
+// to change the keys and values as it goes. This method requires that
+// the wrapped object be a map[string]interface{}
+func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map {
+ newMap := make(map[string]interface{})
+ for k, v := range m {
+ modifiedKey, modifiedVal := transformer(k, v)
+ newMap[modifiedKey] = modifiedVal
+ }
+ return New(newMap)
+}
+
+// TransformKeys builds a new map using the specified key mapping.
+//
+// Unspecified keys will be unaltered.
+// This method requires that the wrapped object be a map[string]interface{}
+func (m Map) TransformKeys(mapping map[string]string) Map {
+ return m.Transform(func(key string, value interface{}) (string, interface{}) {
+
+ if newKey, ok := mapping[key]; ok {
+ return newKey, value
+ }
+
+ return key, value
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security.go b/Godeps/_workspace/src/github.com/stretchr/objx/security.go
new file mode 100644
index 0000000..fdd6be9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/security.go
@@ -0,0 +1,14 @@
+package objx
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+)
+
+// HashWithKey hashes the specified string using the security
+// key.
+func HashWithKey(data, key string) string {
+ hash := sha1.New()
+ hash.Write([]byte(data + ":" + key))
+ return hex.EncodeToString(hash.Sum(nil))
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go
new file mode 100644
index 0000000..d9e0b47
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go
@@ -0,0 +1,17 @@
+package objx
+
+// Has gets whether there is something at the specified selector
+// or not.
+//
+// If m is nil, Has will always return false.
+func (m Map) Has(selector string) bool {
+ if m == nil {
+ return false
+ }
+ return !m.Get(selector).IsNil()
+}
+
+// IsNil gets whether the data is nil or not.
+func (v *Value) IsNil() bool {
+ return v == nil || v.data == nil
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go
new file mode 100644
index 0000000..f3ecb29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go
@@ -0,0 +1,2881 @@
+package objx
+
+/*
+ Inter (interface{} and []interface{})
+ --------------------------------------------------
+*/
+
+// Inter gets the value as a interface{}, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Inter(optionalDefault ...interface{}) interface{} {
+ if s, ok := v.data.(interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInter gets the value as a interface{}.
+//
+// Panics if the object is not a interface{}.
+func (v *Value) MustInter() interface{} {
+ return v.data.(interface{})
+}
+
+// InterSlice gets the value as a []interface{}, returns the optionalDefault
+// value or nil if the value is not a []interface{}.
+func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} {
+ if s, ok := v.data.([]interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInterSlice gets the value as a []interface{}.
+//
+// Panics if the object is not a []interface{}.
+func (v *Value) MustInterSlice() []interface{} {
+ return v.data.([]interface{})
+}
+
+// IsInter gets whether the object contained is a interface{} or not.
+func (v *Value) IsInter() bool {
+ _, ok := v.data.(interface{})
+ return ok
+}
+
+// IsInterSlice gets whether the object contained is a []interface{} or not.
+func (v *Value) IsInterSlice() bool {
+ _, ok := v.data.([]interface{})
+ return ok
+}
+
+// EachInter calls the specified callback for each object
+// in the []interface{}.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInter(callback func(int, interface{}) bool) *Value {
+
+ for index, val := range v.MustInterSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereInter uses the specified decider function to select items
+// from the []interface{}. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value {
+
+ var selected []interface{}
+
+ v.EachInter(func(index int, val interface{}) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupInter uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]interface{}.
+func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value {
+
+ groups := make(map[string][]interface{})
+
+ v.EachInter(func(index int, val interface{}) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]interface{}, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceInter uses the specified function to replace each interface{}s
+// by iterating each item. The data in the returned result will be a
+// []interface{} containing the replaced items.
+func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value {
+
+ arr := v.MustInterSlice()
+ replaced := make([]interface{}, len(arr))
+
+ v.EachInter(func(index int, val interface{}) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectInter uses the specified collector function to collect a value
+// for each of the interface{}s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value {
+
+ arr := v.MustInterSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachInter(func(index int, val interface{}) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ MSI (map[string]interface{} and []map[string]interface{})
+ --------------------------------------------------
+*/
+
+// MSI gets the value as a map[string]interface{}, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} {
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustMSI gets the value as a map[string]interface{}.
+//
+// Panics if the object is not a map[string]interface{}.
+func (v *Value) MustMSI() map[string]interface{} {
+ return v.data.(map[string]interface{})
+}
+
+// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault
+// value or nil if the value is not a []map[string]interface{}.
+func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} {
+ if s, ok := v.data.([]map[string]interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustMSISlice gets the value as a []map[string]interface{}.
+//
+// Panics if the object is not a []map[string]interface{}.
+func (v *Value) MustMSISlice() []map[string]interface{} {
+ return v.data.([]map[string]interface{})
+}
+
+// IsMSI gets whether the object contained is a map[string]interface{} or not.
+func (v *Value) IsMSI() bool {
+ _, ok := v.data.(map[string]interface{})
+ return ok
+}
+
+// IsMSISlice gets whether the object contained is a []map[string]interface{} or not.
+func (v *Value) IsMSISlice() bool {
+ _, ok := v.data.([]map[string]interface{})
+ return ok
+}
+
+// EachMSI calls the specified callback for each object
+// in the []map[string]interface{}.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value {
+
+ for index, val := range v.MustMSISlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereMSI uses the specified decider function to select items
+// from the []map[string]interface{}. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value {
+
+ var selected []map[string]interface{}
+
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupMSI uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]map[string]interface{}.
+func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value {
+
+ groups := make(map[string][]map[string]interface{})
+
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]map[string]interface{}, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceMSI uses the specified function to replace each map[string]interface{}s
+// by iterating each item. The data in the returned result will be a
+// []map[string]interface{} containing the replaced items.
+func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value {
+
+ arr := v.MustMSISlice()
+ replaced := make([]map[string]interface{}, len(arr))
+
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectMSI uses the specified collector function to collect a value
+// for each of the map[string]interface{}s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value {
+
+ arr := v.MustMSISlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ ObjxMap ((Map) and [](Map))
+ --------------------------------------------------
+*/
+
+// ObjxMap gets the value as a (Map), returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) ObjxMap(optionalDefault ...(Map)) Map {
+ if s, ok := v.data.((Map)); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return New(nil)
+}
+
+// MustObjxMap gets the value as a (Map).
+//
+// Panics if the object is not a (Map).
+func (v *Value) MustObjxMap() Map {
+ return v.data.((Map))
+}
+
+// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault
+// value or nil if the value is not a [](Map).
+func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) {
+ if s, ok := v.data.([](Map)); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustObjxMapSlice gets the value as a [](Map).
+//
+// Panics if the object is not a [](Map).
+func (v *Value) MustObjxMapSlice() [](Map) {
+ return v.data.([](Map))
+}
+
+// IsObjxMap gets whether the object contained is a (Map) or not.
+func (v *Value) IsObjxMap() bool {
+ _, ok := v.data.((Map))
+ return ok
+}
+
+// IsObjxMapSlice gets whether the object contained is a [](Map) or not.
+func (v *Value) IsObjxMapSlice() bool {
+ _, ok := v.data.([](Map))
+ return ok
+}
+
+// EachObjxMap calls the specified callback for each object
+// in the [](Map).
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value {
+
+ for index, val := range v.MustObjxMapSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereObjxMap uses the specified decider function to select items
+// from the [](Map). The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value {
+
+ var selected [](Map)
+
+ v.EachObjxMap(func(index int, val Map) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupObjxMap uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][](Map).
+func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
+
+ groups := make(map[string][](Map))
+
+ v.EachObjxMap(func(index int, val Map) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([](Map), 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceObjxMap uses the specified function to replace each (Map)s
+// by iterating each item. The data in the returned result will be a
+// [](Map) containing the replaced items.
+func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value {
+
+ arr := v.MustObjxMapSlice()
+ replaced := make([](Map), len(arr))
+
+ v.EachObjxMap(func(index int, val Map) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectObjxMap uses the specified collector function to collect a value
+// for each of the (Map)s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value {
+
+ arr := v.MustObjxMapSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachObjxMap(func(index int, val Map) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Bool (bool and []bool)
+ --------------------------------------------------
+*/
+
+// Bool gets the value as a bool, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Bool(optionalDefault ...bool) bool {
+ if s, ok := v.data.(bool); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return false
+}
+
+// MustBool gets the value as a bool.
+//
+// Panics if the object is not a bool.
+func (v *Value) MustBool() bool {
+ return v.data.(bool)
+}
+
+// BoolSlice gets the value as a []bool, returns the optionalDefault
+// value or nil if the value is not a []bool.
+func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool {
+ if s, ok := v.data.([]bool); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustBoolSlice gets the value as a []bool.
+//
+// Panics if the object is not a []bool.
+func (v *Value) MustBoolSlice() []bool {
+ return v.data.([]bool)
+}
+
+// IsBool gets whether the object contained is a bool or not.
+func (v *Value) IsBool() bool {
+ _, ok := v.data.(bool)
+ return ok
+}
+
+// IsBoolSlice gets whether the object contained is a []bool or not.
+func (v *Value) IsBoolSlice() bool {
+ _, ok := v.data.([]bool)
+ return ok
+}
+
+// EachBool calls the specified callback for each object
+// in the []bool.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachBool(callback func(int, bool) bool) *Value {
+
+ for index, val := range v.MustBoolSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereBool uses the specified decider function to select items
+// from the []bool. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereBool(decider func(int, bool) bool) *Value {
+
+ var selected []bool
+
+ v.EachBool(func(index int, val bool) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupBool uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]bool.
+func (v *Value) GroupBool(grouper func(int, bool) string) *Value {
+
+ groups := make(map[string][]bool)
+
+ v.EachBool(func(index int, val bool) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]bool, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceBool uses the specified function to replace each bools
+// by iterating each item. The data in the returned result will be a
+// []bool containing the replaced items.
+func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value {
+
+ arr := v.MustBoolSlice()
+ replaced := make([]bool, len(arr))
+
+ v.EachBool(func(index int, val bool) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectBool uses the specified collector function to collect a value
+// for each of the bools in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value {
+
+ arr := v.MustBoolSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachBool(func(index int, val bool) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Str (string and []string)
+ --------------------------------------------------
+*/
+
+// Str gets the value as a string, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Str(optionalDefault ...string) string {
+ if s, ok := v.data.(string); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return ""
+}
+
+// MustStr gets the value as a string.
+//
+// Panics if the object is not a string.
+func (v *Value) MustStr() string {
+ return v.data.(string)
+}
+
+// StrSlice gets the value as a []string, returns the optionalDefault
+// value or nil if the value is not a []string.
+func (v *Value) StrSlice(optionalDefault ...[]string) []string {
+ if s, ok := v.data.([]string); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustStrSlice gets the value as a []string.
+//
+// Panics if the object is not a []string.
+func (v *Value) MustStrSlice() []string {
+ return v.data.([]string)
+}
+
+// IsStr gets whether the object contained is a string or not.
+func (v *Value) IsStr() bool {
+ _, ok := v.data.(string)
+ return ok
+}
+
+// IsStrSlice gets whether the object contained is a []string or not.
+func (v *Value) IsStrSlice() bool {
+ _, ok := v.data.([]string)
+ return ok
+}
+
+// EachStr calls the specified callback for each object
+// in the []string.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachStr(callback func(int, string) bool) *Value {
+
+ for index, val := range v.MustStrSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereStr uses the specified decider function to select items
+// from the []string. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereStr(decider func(int, string) bool) *Value {
+
+ var selected []string
+
+ v.EachStr(func(index int, val string) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupStr uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]string.
+func (v *Value) GroupStr(grouper func(int, string) string) *Value {
+
+ groups := make(map[string][]string)
+
+ v.EachStr(func(index int, val string) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]string, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceStr uses the specified function to replace each strings
+// by iterating each item. The data in the returned result will be a
+// []string containing the replaced items.
+func (v *Value) ReplaceStr(replacer func(int, string) string) *Value {
+
+ arr := v.MustStrSlice()
+ replaced := make([]string, len(arr))
+
+ v.EachStr(func(index int, val string) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectStr uses the specified collector function to collect a value
+// for each of the strings in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectStr(collector func(int, string) interface{}) *Value {
+
+ arr := v.MustStrSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachStr(func(index int, val string) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Int (int and []int)
+ --------------------------------------------------
+*/
+
+// Int gets the value as a int, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int(optionalDefault ...int) int {
+ if s, ok := v.data.(int); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt gets the value as a int.
+//
+// Panics if the object is not a int.
+func (v *Value) MustInt() int {
+ return v.data.(int)
+}
+
+// IntSlice gets the value as a []int, returns the optionalDefault
+// value or nil if the value is not a []int.
+func (v *Value) IntSlice(optionalDefault ...[]int) []int {
+ if s, ok := v.data.([]int); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustIntSlice gets the value as a []int.
+//
+// Panics if the object is not a []int.
+func (v *Value) MustIntSlice() []int {
+ return v.data.([]int)
+}
+
+// IsInt gets whether the object contained is a int or not.
+func (v *Value) IsInt() bool {
+ _, ok := v.data.(int)
+ return ok
+}
+
+// IsIntSlice gets whether the object contained is a []int or not.
+func (v *Value) IsIntSlice() bool {
+ _, ok := v.data.([]int)
+ return ok
+}
+
+// EachInt calls the specified callback for each object
+// in the []int.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt(callback func(int, int) bool) *Value {
+
+ for index, val := range v.MustIntSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereInt uses the specified decider function to select items
+// from the []int. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt(decider func(int, int) bool) *Value {
+
+ var selected []int
+
+ v.EachInt(func(index int, val int) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupInt uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int.
+func (v *Value) GroupInt(grouper func(int, int) string) *Value {
+
+ groups := make(map[string][]int)
+
+ v.EachInt(func(index int, val int) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceInt uses the specified function to replace each ints
+// by iterating each item. The data in the returned result will be a
+// []int containing the replaced items.
+func (v *Value) ReplaceInt(replacer func(int, int) int) *Value {
+
+ arr := v.MustIntSlice()
+ replaced := make([]int, len(arr))
+
+ v.EachInt(func(index int, val int) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectInt uses the specified collector function to collect a value
+// for each of the ints in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt(collector func(int, int) interface{}) *Value {
+
+ arr := v.MustIntSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachInt(func(index int, val int) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Int8 (int8 and []int8)
+ --------------------------------------------------
+*/
+
+// Int8 gets the value as a int8, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int8(optionalDefault ...int8) int8 {
+ if s, ok := v.data.(int8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt8 gets the value as a int8.
+//
+// Panics if the object is not a int8.
+func (v *Value) MustInt8() int8 {
+ return v.data.(int8)
+}
+
+// Int8Slice gets the value as a []int8, returns the optionalDefault
+// value or nil if the value is not a []int8.
+func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 {
+ if s, ok := v.data.([]int8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt8Slice gets the value as a []int8.
+//
+// Panics if the object is not a []int8.
+func (v *Value) MustInt8Slice() []int8 {
+ return v.data.([]int8)
+}
+
+// IsInt8 gets whether the object contained is a int8 or not.
+func (v *Value) IsInt8() bool {
+ _, ok := v.data.(int8)
+ return ok
+}
+
+// IsInt8Slice gets whether the object contained is a []int8 or not.
+func (v *Value) IsInt8Slice() bool {
+ _, ok := v.data.([]int8)
+ return ok
+}
+
+// EachInt8 calls the specified callback for each object
+// in the []int8.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt8(callback func(int, int8) bool) *Value {
+
+ for index, val := range v.MustInt8Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereInt8 uses the specified decider function to select items
+// from the []int8. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt8(decider func(int, int8) bool) *Value {
+
+ var selected []int8
+
+ v.EachInt8(func(index int, val int8) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupInt8 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int8.
+func (v *Value) GroupInt8(grouper func(int, int8) string) *Value {
+
+ groups := make(map[string][]int8)
+
+ v.EachInt8(func(index int, val int8) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int8, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceInt8 uses the specified function to replace each int8s
+// by iterating each item. The data in the returned result will be a
+// []int8 containing the replaced items.
+func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value {
+
+ arr := v.MustInt8Slice()
+ replaced := make([]int8, len(arr))
+
+ v.EachInt8(func(index int, val int8) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectInt8 uses the specified collector function to collect a value
+// for each of the int8s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value {
+
+ arr := v.MustInt8Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachInt8(func(index int, val int8) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Int16 (int16 and []int16)
+ --------------------------------------------------
+*/
+
+// Int16 gets the value as a int16, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int16(optionalDefault ...int16) int16 {
+ if s, ok := v.data.(int16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt16 gets the value as a int16.
+//
+// Panics if the object is not a int16.
+func (v *Value) MustInt16() int16 {
+ return v.data.(int16)
+}
+
+// Int16Slice gets the value as a []int16, returns the optionalDefault
+// value or nil if the value is not a []int16.
+func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 {
+ if s, ok := v.data.([]int16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt16Slice gets the value as a []int16.
+//
+// Panics if the object is not a []int16.
+func (v *Value) MustInt16Slice() []int16 {
+ return v.data.([]int16)
+}
+
+// IsInt16 gets whether the object contained is a int16 or not.
+func (v *Value) IsInt16() bool {
+ _, ok := v.data.(int16)
+ return ok
+}
+
+// IsInt16Slice gets whether the object contained is a []int16 or not.
+func (v *Value) IsInt16Slice() bool {
+ _, ok := v.data.([]int16)
+ return ok
+}
+
+// EachInt16 calls the specified callback for each object
+// in the []int16.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt16(callback func(int, int16) bool) *Value {
+
+ for index, val := range v.MustInt16Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereInt16 uses the specified decider function to select items
+// from the []int16. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt16(decider func(int, int16) bool) *Value {
+
+ var selected []int16
+
+ v.EachInt16(func(index int, val int16) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupInt16 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int16.
+func (v *Value) GroupInt16(grouper func(int, int16) string) *Value {
+
+ groups := make(map[string][]int16)
+
+ v.EachInt16(func(index int, val int16) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int16, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceInt16 uses the specified function to replace each int16s
+// by iterating each item. The data in the returned result will be a
+// []int16 containing the replaced items.
+func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value {
+
+ arr := v.MustInt16Slice()
+ replaced := make([]int16, len(arr))
+
+ v.EachInt16(func(index int, val int16) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectInt16 uses the specified collector function to collect a value
+// for each of the int16s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value {
+
+ arr := v.MustInt16Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachInt16(func(index int, val int16) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Int32 (int32 and []int32)
+ --------------------------------------------------
+*/
+
+// Int32 gets the value as a int32, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int32(optionalDefault ...int32) int32 {
+ if s, ok := v.data.(int32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt32 gets the value as a int32.
+//
+// Panics if the object is not a int32.
+func (v *Value) MustInt32() int32 {
+ return v.data.(int32)
+}
+
+// Int32Slice gets the value as a []int32, returns the optionalDefault
+// value or nil if the value is not a []int32.
+func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 {
+ if s, ok := v.data.([]int32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt32Slice gets the value as a []int32.
+//
+// Panics if the object is not a []int32.
+func (v *Value) MustInt32Slice() []int32 {
+ return v.data.([]int32)
+}
+
+// IsInt32 gets whether the object contained is a int32 or not.
+func (v *Value) IsInt32() bool {
+ _, ok := v.data.(int32)
+ return ok
+}
+
+// IsInt32Slice gets whether the object contained is a []int32 or not.
+func (v *Value) IsInt32Slice() bool {
+ _, ok := v.data.([]int32)
+ return ok
+}
+
+// EachInt32 calls the specified callback for each object
+// in the []int32.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt32(callback func(int, int32) bool) *Value {
+
+ for index, val := range v.MustInt32Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereInt32 uses the specified decider function to select items
+// from the []int32. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt32(decider func(int, int32) bool) *Value {
+
+ var selected []int32
+
+ v.EachInt32(func(index int, val int32) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupInt32 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int32.
+func (v *Value) GroupInt32(grouper func(int, int32) string) *Value {
+
+ groups := make(map[string][]int32)
+
+ v.EachInt32(func(index int, val int32) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int32, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceInt32 uses the specified function to replace each int32s
+// by iterating each item. The data in the returned result will be a
+// []int32 containing the replaced items.
+func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value {
+
+ arr := v.MustInt32Slice()
+ replaced := make([]int32, len(arr))
+
+ v.EachInt32(func(index int, val int32) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectInt32 uses the specified collector function to collect a value
+// for each of the int32s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value {
+
+ arr := v.MustInt32Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachInt32(func(index int, val int32) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Int64 (int64 and []int64)
+ --------------------------------------------------
+*/
+
+// Int64 gets the value as a int64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Int64(optionalDefault ...int64) int64 {
+ if s, ok := v.data.(int64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustInt64 gets the value as a int64.
+//
+// Panics if the object is not a int64.
+func (v *Value) MustInt64() int64 {
+ return v.data.(int64)
+}
+
+// Int64Slice gets the value as a []int64, returns the optionalDefault
+// value or nil if the value is not a []int64.
+func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 {
+ if s, ok := v.data.([]int64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustInt64Slice gets the value as a []int64.
+//
+// Panics if the object is not a []int64.
+func (v *Value) MustInt64Slice() []int64 {
+ return v.data.([]int64)
+}
+
+// IsInt64 gets whether the object contained is a int64 or not.
+func (v *Value) IsInt64() bool {
+ _, ok := v.data.(int64)
+ return ok
+}
+
+// IsInt64Slice gets whether the object contained is a []int64 or not.
+func (v *Value) IsInt64Slice() bool {
+ _, ok := v.data.([]int64)
+ return ok
+}
+
+// EachInt64 calls the specified callback for each object
+// in the []int64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachInt64(callback func(int, int64) bool) *Value {
+
+ for index, val := range v.MustInt64Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereInt64 uses the specified decider function to select items
+// from the []int64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereInt64(decider func(int, int64) bool) *Value {
+
+ var selected []int64
+
+ v.EachInt64(func(index int, val int64) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupInt64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]int64.
+func (v *Value) GroupInt64(grouper func(int, int64) string) *Value {
+
+ groups := make(map[string][]int64)
+
+ v.EachInt64(func(index int, val int64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]int64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceInt64 uses the specified function to replace each int64s
+// by iterating each item. The data in the returned result will be a
+// []int64 containing the replaced items.
+func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value {
+
+ arr := v.MustInt64Slice()
+ replaced := make([]int64, len(arr))
+
+ v.EachInt64(func(index int, val int64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectInt64 uses the specified collector function to collect a value
+// for each of the int64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value {
+
+ arr := v.MustInt64Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachInt64(func(index int, val int64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Uint (uint and []uint)
+ --------------------------------------------------
+*/
+
+// Uint gets the value as a uint, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint(optionalDefault ...uint) uint {
+ if s, ok := v.data.(uint); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint gets the value as a uint.
+//
+// Panics if the object is not a uint.
+func (v *Value) MustUint() uint {
+ return v.data.(uint)
+}
+
+// UintSlice gets the value as a []uint, returns the optionalDefault
+// value or nil if the value is not a []uint.
+func (v *Value) UintSlice(optionalDefault ...[]uint) []uint {
+ if s, ok := v.data.([]uint); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUintSlice gets the value as a []uint.
+//
+// Panics if the object is not a []uint.
+func (v *Value) MustUintSlice() []uint {
+ return v.data.([]uint)
+}
+
+// IsUint gets whether the object contained is a uint or not.
+func (v *Value) IsUint() bool {
+ _, ok := v.data.(uint)
+ return ok
+}
+
+// IsUintSlice gets whether the object contained is a []uint or not.
+func (v *Value) IsUintSlice() bool {
+ _, ok := v.data.([]uint)
+ return ok
+}
+
+// EachUint calls the specified callback for each object
+// in the []uint.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint(callback func(int, uint) bool) *Value {
+
+ for index, val := range v.MustUintSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereUint uses the specified decider function to select items
+// from the []uint. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint(decider func(int, uint) bool) *Value {
+
+ var selected []uint
+
+ v.EachUint(func(index int, val uint) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupUint uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint.
+func (v *Value) GroupUint(grouper func(int, uint) string) *Value {
+
+ groups := make(map[string][]uint)
+
+ v.EachUint(func(index int, val uint) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceUint uses the specified function to replace each uints
+// by iterating each item. The data in the returned result will be a
+// []uint containing the replaced items.
+func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value {
+
+ arr := v.MustUintSlice()
+ replaced := make([]uint, len(arr))
+
+ v.EachUint(func(index int, val uint) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectUint uses the specified collector function to collect a value
+// for each of the uints in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value {
+
+ arr := v.MustUintSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachUint(func(index int, val uint) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Uint8 (uint8 and []uint8)
+ --------------------------------------------------
+*/
+
+// Uint8 gets the value as a uint8, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint8(optionalDefault ...uint8) uint8 {
+ if s, ok := v.data.(uint8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint8 gets the value as a uint8.
+//
+// Panics if the object is not a uint8.
+func (v *Value) MustUint8() uint8 {
+ return v.data.(uint8)
+}
+
+// Uint8Slice gets the value as a []uint8, returns the optionalDefault
+// value or nil if the value is not a []uint8.
+func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 {
+ if s, ok := v.data.([]uint8); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint8Slice gets the value as a []uint8.
+//
+// Panics if the object is not a []uint8.
+func (v *Value) MustUint8Slice() []uint8 {
+ return v.data.([]uint8)
+}
+
+// IsUint8 gets whether the object contained is a uint8 or not.
+func (v *Value) IsUint8() bool {
+ _, ok := v.data.(uint8)
+ return ok
+}
+
+// IsUint8Slice gets whether the object contained is a []uint8 or not.
+func (v *Value) IsUint8Slice() bool {
+ _, ok := v.data.([]uint8)
+ return ok
+}
+
+// EachUint8 calls the specified callback for each object
+// in the []uint8.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint8(callback func(int, uint8) bool) *Value {
+
+ for index, val := range v.MustUint8Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereUint8 uses the specified decider function to select items
+// from the []uint8. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value {
+
+ var selected []uint8
+
+ v.EachUint8(func(index int, val uint8) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupUint8 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint8.
+func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value {
+
+ groups := make(map[string][]uint8)
+
+ v.EachUint8(func(index int, val uint8) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint8, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceUint8 uses the specified function to replace each uint8s
+// by iterating each item. The data in the returned result will be a
+// []uint8 containing the replaced items.
+func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value {
+
+ arr := v.MustUint8Slice()
+ replaced := make([]uint8, len(arr))
+
+ v.EachUint8(func(index int, val uint8) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectUint8 uses the specified collector function to collect a value
+// for each of the uint8s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value {
+
+ arr := v.MustUint8Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachUint8(func(index int, val uint8) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Uint16 (uint16 and []uint16)
+ --------------------------------------------------
+*/
+
+// Uint16 gets the value as a uint16, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint16(optionalDefault ...uint16) uint16 {
+ if s, ok := v.data.(uint16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint16 gets the value as a uint16.
+//
+// Panics if the object is not a uint16.
+func (v *Value) MustUint16() uint16 {
+ return v.data.(uint16)
+}
+
+// Uint16Slice gets the value as a []uint16, returns the optionalDefault
+// value or nil if the value is not a []uint16.
+func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 {
+ if s, ok := v.data.([]uint16); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint16Slice gets the value as a []uint16.
+//
+// Panics if the object is not a []uint16.
+func (v *Value) MustUint16Slice() []uint16 {
+ return v.data.([]uint16)
+}
+
+// IsUint16 gets whether the object contained is a uint16 or not.
+func (v *Value) IsUint16() bool {
+ _, ok := v.data.(uint16)
+ return ok
+}
+
+// IsUint16Slice gets whether the object contained is a []uint16 or not.
+func (v *Value) IsUint16Slice() bool {
+ _, ok := v.data.([]uint16)
+ return ok
+}
+
+// EachUint16 calls the specified callback for each object
+// in the []uint16.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint16(callback func(int, uint16) bool) *Value {
+
+ for index, val := range v.MustUint16Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereUint16 uses the specified decider function to select items
+// from the []uint16. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value {
+
+ var selected []uint16
+
+ v.EachUint16(func(index int, val uint16) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupUint16 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint16.
+func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value {
+
+ groups := make(map[string][]uint16)
+
+ v.EachUint16(func(index int, val uint16) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint16, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceUint16 uses the specified function to replace each uint16s
+// by iterating each item. The data in the returned result will be a
+// []uint16 containing the replaced items.
+func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value {
+
+ arr := v.MustUint16Slice()
+ replaced := make([]uint16, len(arr))
+
+ v.EachUint16(func(index int, val uint16) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectUint16 uses the specified collector function to collect a value
+// for each of the uint16s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value {
+
+ arr := v.MustUint16Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachUint16(func(index int, val uint16) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Uint32 (uint32 and []uint32)
+ --------------------------------------------------
+*/
+
+// Uint32 gets the value as a uint32, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint32(optionalDefault ...uint32) uint32 {
+ if s, ok := v.data.(uint32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint32 gets the value as a uint32.
+//
+// Panics if the object is not a uint32.
+func (v *Value) MustUint32() uint32 {
+ return v.data.(uint32)
+}
+
+// Uint32Slice gets the value as a []uint32, returns the optionalDefault
+// value or nil if the value is not a []uint32.
+func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 {
+ if s, ok := v.data.([]uint32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint32Slice gets the value as a []uint32.
+//
+// Panics if the object is not a []uint32.
+func (v *Value) MustUint32Slice() []uint32 {
+ return v.data.([]uint32)
+}
+
+// IsUint32 gets whether the object contained is a uint32 or not.
+func (v *Value) IsUint32() bool {
+ _, ok := v.data.(uint32)
+ return ok
+}
+
+// IsUint32Slice gets whether the object contained is a []uint32 or not.
+func (v *Value) IsUint32Slice() bool {
+ _, ok := v.data.([]uint32)
+ return ok
+}
+
+// EachUint32 calls the specified callback for each object
+// in the []uint32.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint32(callback func(int, uint32) bool) *Value {
+
+ for index, val := range v.MustUint32Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereUint32 uses the specified decider function to select items
+// from the []uint32. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value {
+
+ var selected []uint32
+
+ v.EachUint32(func(index int, val uint32) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupUint32 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint32.
+func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value {
+
+ groups := make(map[string][]uint32)
+
+ v.EachUint32(func(index int, val uint32) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint32, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceUint32 uses the specified function to replace each uint32s
+// by iterating each item. The data in the returned result will be a
+// []uint32 containing the replaced items.
+func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value {
+
+ arr := v.MustUint32Slice()
+ replaced := make([]uint32, len(arr))
+
+ v.EachUint32(func(index int, val uint32) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectUint32 uses the specified collector function to collect a value
+// for each of the uint32s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value {
+
+ arr := v.MustUint32Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachUint32(func(index int, val uint32) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Uint64 (uint64 and []uint64)
+ --------------------------------------------------
+*/
+
+// Uint64 gets the value as a uint64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uint64(optionalDefault ...uint64) uint64 {
+ if s, ok := v.data.(uint64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUint64 gets the value as a uint64.
+//
+// Panics if the object is not a uint64.
+func (v *Value) MustUint64() uint64 {
+ return v.data.(uint64)
+}
+
+// Uint64Slice gets the value as a []uint64, returns the optionalDefault
+// value or nil if the value is not a []uint64.
+func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 {
+ if s, ok := v.data.([]uint64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUint64Slice gets the value as a []uint64.
+//
+// Panics if the object is not a []uint64.
+func (v *Value) MustUint64Slice() []uint64 {
+ return v.data.([]uint64)
+}
+
+// IsUint64 gets whether the object contained is a uint64 or not.
+func (v *Value) IsUint64() bool {
+ _, ok := v.data.(uint64)
+ return ok
+}
+
+// IsUint64Slice gets whether the object contained is a []uint64 or not.
+func (v *Value) IsUint64Slice() bool {
+ _, ok := v.data.([]uint64)
+ return ok
+}
+
+// EachUint64 calls the specified callback for each object
+// in the []uint64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUint64(callback func(int, uint64) bool) *Value {
+
+ for index, val := range v.MustUint64Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereUint64 uses the specified decider function to select items
+// from the []uint64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value {
+
+ var selected []uint64
+
+ v.EachUint64(func(index int, val uint64) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupUint64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uint64.
+func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value {
+
+ groups := make(map[string][]uint64)
+
+ v.EachUint64(func(index int, val uint64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uint64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceUint64 uses the specified function to replace each uint64s
+// by iterating each item. The data in the returned result will be a
+// []uint64 containing the replaced items.
+func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value {
+
+ arr := v.MustUint64Slice()
+ replaced := make([]uint64, len(arr))
+
+ v.EachUint64(func(index int, val uint64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectUint64 uses the specified collector function to collect a value
+// for each of the uint64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value {
+
+ arr := v.MustUint64Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachUint64(func(index int, val uint64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Uintptr (uintptr and []uintptr)
+ --------------------------------------------------
+*/
+
+// Uintptr gets the value as a uintptr, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr {
+ if s, ok := v.data.(uintptr); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustUintptr gets the value as a uintptr.
+//
+// Panics if the object is not a uintptr.
+func (v *Value) MustUintptr() uintptr {
+ return v.data.(uintptr)
+}
+
+// UintptrSlice gets the value as a []uintptr, returns the optionalDefault
+// value or nil if the value is not a []uintptr.
+func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr {
+ if s, ok := v.data.([]uintptr); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustUintptrSlice gets the value as a []uintptr.
+//
+// Panics if the object is not a []uintptr.
+func (v *Value) MustUintptrSlice() []uintptr {
+ return v.data.([]uintptr)
+}
+
+// IsUintptr gets whether the object contained is a uintptr or not.
+func (v *Value) IsUintptr() bool {
+ _, ok := v.data.(uintptr)
+ return ok
+}
+
+// IsUintptrSlice gets whether the object contained is a []uintptr or not.
+func (v *Value) IsUintptrSlice() bool {
+ _, ok := v.data.([]uintptr)
+ return ok
+}
+
+// EachUintptr calls the specified callback for each object
+// in the []uintptr.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value {
+
+ for index, val := range v.MustUintptrSlice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereUintptr uses the specified decider function to select items
+// from the []uintptr. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value {
+
+ var selected []uintptr
+
+ v.EachUintptr(func(index int, val uintptr) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupUintptr uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]uintptr.
+func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value {
+
+ groups := make(map[string][]uintptr)
+
+ v.EachUintptr(func(index int, val uintptr) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]uintptr, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceUintptr uses the specified function to replace each uintptrs
+// by iterating each item. The data in the returned result will be a
+// []uintptr containing the replaced items.
+func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value {
+
+ arr := v.MustUintptrSlice()
+ replaced := make([]uintptr, len(arr))
+
+ v.EachUintptr(func(index int, val uintptr) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectUintptr uses the specified collector function to collect a value
+// for each of the uintptrs in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value {
+
+ arr := v.MustUintptrSlice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachUintptr(func(index int, val uintptr) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Float32 (float32 and []float32)
+ --------------------------------------------------
+*/
+
+// Float32 gets the value as a float32, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Float32(optionalDefault ...float32) float32 {
+ if s, ok := v.data.(float32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustFloat32 gets the value as a float32.
+//
+// Panics if the object is not a float32.
+func (v *Value) MustFloat32() float32 {
+ return v.data.(float32)
+}
+
+// Float32Slice gets the value as a []float32, returns the optionalDefault
+// value or nil if the value is not a []float32.
+func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 {
+ if s, ok := v.data.([]float32); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustFloat32Slice gets the value as a []float32.
+//
+// Panics if the object is not a []float32.
+func (v *Value) MustFloat32Slice() []float32 {
+ return v.data.([]float32)
+}
+
+// IsFloat32 gets whether the object contained is a float32 or not.
+func (v *Value) IsFloat32() bool {
+ _, ok := v.data.(float32)
+ return ok
+}
+
+// IsFloat32Slice gets whether the object contained is a []float32 or not.
+func (v *Value) IsFloat32Slice() bool {
+ _, ok := v.data.([]float32)
+ return ok
+}
+
+// EachFloat32 calls the specified callback for each object
+// in the []float32.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachFloat32(callback func(int, float32) bool) *Value {
+
+ for index, val := range v.MustFloat32Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereFloat32 uses the specified decider function to select items
+// from the []float32. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value {
+
+ var selected []float32
+
+ v.EachFloat32(func(index int, val float32) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupFloat32 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]float32.
+func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value {
+
+ groups := make(map[string][]float32)
+
+ v.EachFloat32(func(index int, val float32) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]float32, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceFloat32 uses the specified function to replace each float32s
+// by iterating each item. The data in the returned result will be a
+// []float32 containing the replaced items.
+func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value {
+
+ arr := v.MustFloat32Slice()
+ replaced := make([]float32, len(arr))
+
+ v.EachFloat32(func(index int, val float32) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectFloat32 uses the specified collector function to collect a value
+// for each of the float32s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value {
+
+ arr := v.MustFloat32Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachFloat32(func(index int, val float32) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Float64 (float64 and []float64)
+ --------------------------------------------------
+*/
+
+// Float64 gets the value as a float64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Float64(optionalDefault ...float64) float64 {
+ if s, ok := v.data.(float64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustFloat64 gets the value as a float64.
+//
+// Panics if the object is not a float64.
+func (v *Value) MustFloat64() float64 {
+ return v.data.(float64)
+}
+
+// Float64Slice gets the value as a []float64, returns the optionalDefault
+// value or nil if the value is not a []float64.
+func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 {
+ if s, ok := v.data.([]float64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustFloat64Slice gets the value as a []float64.
+//
+// Panics if the object is not a []float64.
+func (v *Value) MustFloat64Slice() []float64 {
+ return v.data.([]float64)
+}
+
+// IsFloat64 gets whether the object contained is a float64 or not.
+func (v *Value) IsFloat64() bool {
+ _, ok := v.data.(float64)
+ return ok
+}
+
+// IsFloat64Slice gets whether the object contained is a []float64 or not.
+func (v *Value) IsFloat64Slice() bool {
+ _, ok := v.data.([]float64)
+ return ok
+}
+
+// EachFloat64 calls the specified callback for each object
+// in the []float64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachFloat64(callback func(int, float64) bool) *Value {
+
+ for index, val := range v.MustFloat64Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereFloat64 uses the specified decider function to select items
+// from the []float64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value {
+
+ var selected []float64
+
+ v.EachFloat64(func(index int, val float64) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupFloat64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]float64.
+func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value {
+
+ groups := make(map[string][]float64)
+
+ v.EachFloat64(func(index int, val float64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]float64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceFloat64 uses the specified function to replace each float64s
+// by iterating each item. The data in the returned result will be a
+// []float64 containing the replaced items.
+func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value {
+
+ arr := v.MustFloat64Slice()
+ replaced := make([]float64, len(arr))
+
+ v.EachFloat64(func(index int, val float64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectFloat64 uses the specified collector function to collect a value
+// for each of the float64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value {
+
+ arr := v.MustFloat64Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachFloat64(func(index int, val float64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Complex64 (complex64 and []complex64)
+ --------------------------------------------------
+*/
+
+// Complex64 gets the value as a complex64, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Complex64(optionalDefault ...complex64) complex64 {
+ if s, ok := v.data.(complex64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustComplex64 gets the value as a complex64.
+//
+// Panics if the object is not a complex64.
+func (v *Value) MustComplex64() complex64 {
+ return v.data.(complex64)
+}
+
+// Complex64Slice gets the value as a []complex64, returns the optionalDefault
+// value or nil if the value is not a []complex64.
+func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 {
+ if s, ok := v.data.([]complex64); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustComplex64Slice gets the value as a []complex64.
+//
+// Panics if the object is not a []complex64.
+func (v *Value) MustComplex64Slice() []complex64 {
+ return v.data.([]complex64)
+}
+
+// IsComplex64 gets whether the object contained is a complex64 or not.
+func (v *Value) IsComplex64() bool {
+ _, ok := v.data.(complex64)
+ return ok
+}
+
+// IsComplex64Slice gets whether the object contained is a []complex64 or not.
+func (v *Value) IsComplex64Slice() bool {
+ _, ok := v.data.([]complex64)
+ return ok
+}
+
+// EachComplex64 calls the specified callback for each object
+// in the []complex64.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value {
+
+ for index, val := range v.MustComplex64Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereComplex64 uses the specified decider function to select items
+// from the []complex64. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value {
+
+ var selected []complex64
+
+ v.EachComplex64(func(index int, val complex64) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupComplex64 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]complex64.
+func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value {
+
+ groups := make(map[string][]complex64)
+
+ v.EachComplex64(func(index int, val complex64) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]complex64, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceComplex64 uses the specified function to replace each complex64s
+// by iterating each item. The data in the returned result will be a
+// []complex64 containing the replaced items.
+func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value {
+
+ arr := v.MustComplex64Slice()
+ replaced := make([]complex64, len(arr))
+
+ v.EachComplex64(func(index int, val complex64) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectComplex64 uses the specified collector function to collect a value
+// for each of the complex64s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value {
+
+ arr := v.MustComplex64Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachComplex64(func(index int, val complex64) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
+
+/*
+ Complex128 (complex128 and []complex128)
+ --------------------------------------------------
+*/
+
+// Complex128 gets the value as a complex128, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) Complex128(optionalDefault ...complex128) complex128 {
+ if s, ok := v.data.(complex128); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return 0
+}
+
+// MustComplex128 gets the value as a complex128.
+//
+// Panics if the object is not a complex128.
+func (v *Value) MustComplex128() complex128 {
+ return v.data.(complex128)
+}
+
+// Complex128Slice gets the value as a []complex128, returns the optionalDefault
+// value or nil if the value is not a []complex128.
+func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 {
+ if s, ok := v.data.([]complex128); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustComplex128Slice gets the value as a []complex128.
+//
+// Panics if the object is not a []complex128.
+func (v *Value) MustComplex128Slice() []complex128 {
+ return v.data.([]complex128)
+}
+
+// IsComplex128 gets whether the object contained is a complex128 or not.
+func (v *Value) IsComplex128() bool {
+ _, ok := v.data.(complex128)
+ return ok
+}
+
+// IsComplex128Slice gets whether the object contained is a []complex128 or not.
+func (v *Value) IsComplex128Slice() bool {
+ _, ok := v.data.([]complex128)
+ return ok
+}
+
+// EachComplex128 calls the specified callback for each object
+// in the []complex128.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value {
+
+ for index, val := range v.MustComplex128Slice() {
+ carryon := callback(index, val)
+ if carryon == false {
+ break
+ }
+ }
+
+ return v
+
+}
+
+// WhereComplex128 uses the specified decider function to select items
+// from the []complex128. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value {
+
+ var selected []complex128
+
+ v.EachComplex128(func(index int, val complex128) bool {
+ shouldSelect := decider(index, val)
+ if shouldSelect == false {
+ selected = append(selected, val)
+ }
+ return true
+ })
+
+ return &Value{data: selected}
+
+}
+
+// GroupComplex128 uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]complex128.
+func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value {
+
+ groups := make(map[string][]complex128)
+
+ v.EachComplex128(func(index int, val complex128) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]complex128, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+
+ return &Value{data: groups}
+
+}
+
+// ReplaceComplex128 uses the specified function to replace each complex128s
+// by iterating each item. The data in the returned result will be a
+// []complex128 containing the replaced items.
+func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value {
+
+ arr := v.MustComplex128Slice()
+ replaced := make([]complex128, len(arr))
+
+ v.EachComplex128(func(index int, val complex128) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+
+ return &Value{data: replaced}
+
+}
+
+// CollectComplex128 uses the specified collector function to collect a value
+// for each of the complex128s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value {
+
+ arr := v.MustComplex128Slice()
+ collected := make([]interface{}, len(arr))
+
+ v.EachComplex128(func(index int, val complex128) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+
+ return &Value{data: collected}
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value.go b/Godeps/_workspace/src/github.com/stretchr/objx/value.go
new file mode 100644
index 0000000..7aaef06
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/objx/value.go
@@ -0,0 +1,13 @@
+package objx
+
+// Value provides methods for extracting interface{} data in various
+// types.
+type Value struct {
+ // data contains the raw data being managed by this Value
+ data interface{}
+}
+
+// Data returns the raw data contained by this Value
+func (v *Value) Data() interface{} {
+ return v.data
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 0000000..8af3157
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,782 @@
+package assert
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+}
+
+// Comparison a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+ Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+
+ if reflect.DeepEqual(expected, actual) {
+ return true
+ }
+
+ expectedValue := reflect.ValueOf(expected)
+ actualValue := reflect.ValueOf(actual)
+ if expectedValue == actualValue {
+ return true
+ }
+
+ // Attempt comparison after type conversion
+ if actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) {
+ return true
+ }
+
+ // Last ditch effort
+ if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) {
+ return true
+ }
+
+ return false
+
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occured in calling code.*/
+
+// CallerInfo returns a string containing the file and line number of the assert call
+// that failed.
+func CallerInfo() string {
+
+ file := ""
+ line := 0
+ ok := false
+
+ for i := 0; ; i++ {
+ _, file, line, ok = runtime.Caller(i)
+ if !ok {
+ return ""
+ }
+ parts := strings.Split(file, "/")
+ dir := parts[len(parts)-2]
+ file = parts[len(parts)-1]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ break
+ }
+ }
+
+ return fmt.Sprintf("%s:%d", file, line)
+}
+
+// getWhitespaceString returns a string that is long enough to overwrite the default
+// output from the go testing framework.
+func getWhitespaceString() string {
+
+ _, file, line, ok := runtime.Caller(1)
+ if !ok {
+ return ""
+ }
+ parts := strings.Split(file, "/")
+ file = parts[len(parts)-1]
+
+ return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line)))
+
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+ if len(msgAndArgs) == 0 || msgAndArgs == nil {
+ return ""
+ }
+ if len(msgAndArgs) == 1 {
+ return msgAndArgs[0].(string)
+ }
+ if len(msgAndArgs) > 1 {
+ return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+ }
+ return ""
+}
+
+// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's
+// test printing (see inner comment for specifics)
+func indentMessageLines(message string, tabs int) string {
+ outBuf := new(bytes.Buffer)
+
+ for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ if i != 0 {
+ outBuf.WriteRune('\n')
+ }
+ for ii := 0; ii < tabs; ii++ {
+ outBuf.WriteRune('\t')
+ // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter
+ // by 1 prematurely.
+ if ii == 0 && i > 0 {
+ ii++
+ }
+ }
+ outBuf.WriteString(scanner.Text())
+ }
+
+ return outBuf.String()
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+
+ if len(message) > 0 {
+ t.Errorf("\r%s\r\tLocation:\t%s\n"+
+ "\r\tError:%s\n"+
+ "\r\tMessages:\t%s\n\r",
+ getWhitespaceString(),
+ CallerInfo(),
+ indentMessageLines(failureMessage, 2),
+ message)
+ } else {
+ t.Errorf("\r%s\r\tLocation:\t%s\n"+
+ "\r\tError:%s\n\r",
+ getWhitespaceString(),
+ CallerInfo(),
+ indentMessageLines(failureMessage, 2))
+ }
+
+ return false
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if !reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+
+ if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+ return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123, "123 and 123 should be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+ if !ObjectsAreEqual(expected, actual) {
+ return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
+ " != %#v (actual)", expected, actual), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Exactly asserts that two objects are equal is value and type.
+//
+// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType)
+ }
+
+ return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err, "err should be something")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+ success := true
+
+ if object == nil {
+ success = false
+ } else {
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
+ success = false
+ }
+ }
+
+ if !success {
+ Fail(t, "Expected not to be nil.", msgAndArgs...)
+ }
+
+ return success
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err, "err should be nothing")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if isNil(object) {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+var zeros = []interface{}{
+ int(0),
+ int8(0),
+ int16(0),
+ int32(0),
+ int64(0),
+ uint(0),
+ uint8(0),
+ uint16(0),
+ uint32(0),
+ uint64(0),
+ float32(0),
+ float64(0),
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+ if object == nil {
+ return true
+ } else if object == "" {
+ return true
+ } else if object == false {
+ return true
+ }
+
+ for _, v := range zeros {
+ if object == v {
+ return true
+ }
+ }
+
+ objValue := reflect.ValueOf(object)
+
+ switch objValue.Kind() {
+ case reflect.Map:
+ fallthrough
+ case reflect.Slice, reflect.Chan:
+ {
+ return (objValue.Len() == 0)
+ }
+ case reflect.Ptr:
+ {
+ switch object.(type) {
+ case *time.Time:
+ return object.(*time.Time).IsZero()
+ default:
+ return false
+ }
+ }
+ }
+ return false
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+ pass := isEmpty(object)
+ if !pass {
+ Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+
+ pass := !isEmpty(object)
+ if !pass {
+ Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+ v := reflect.ValueOf(x)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+ return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3, "The size of slice is not 3")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+ ok, l := getLen(object)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ }
+
+ if l != length {
+ return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ }
+ return true
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool, "myBool should be true")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+
+ if value != true {
+ return Fail(t, "Should be true", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// False asserts that the specified value is true.
+//
+// assert.False(t, myBool, "myBool should be false")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+
+ if value != false {
+ return Fail(t, "Should be false", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+
+ if ObjectsAreEqual(expected, actual) {
+ return Fail(t, "Should not be equal", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+ listValue := reflect.ValueOf(list)
+ elementValue := reflect.ValueOf(element)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ found = false
+ }
+ }()
+
+ if reflect.TypeOf(list).Kind() == reflect.String {
+ return true, strings.Contains(listValue.String(), elementValue.String())
+ }
+
+ for i := 0; i < listValue.Len(); i++ {
+ if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+
+}
+
+// Contains asserts that the specified string or list(array, slice...) contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
+// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotContains asserts that the specified string or list(array, slice...) does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if found {
+ return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+ result := comp()
+ if !result {
+ Fail(t, "Condition failed!", msgAndArgs...)
+ }
+ return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}) {
+
+ didPanic := false
+ var message interface{}
+ func() {
+
+ defer func() {
+ if message = recover(); message != nil {
+ didPanic = true
+ }
+ }()
+
+ // call the target function
+ f()
+
+ }()
+
+ return didPanic, message
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){
+// GoCrazy()
+// }, "Calling GoCrazy() should panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){
+// RemainCalm()
+// }, "Calling RemainCalm() should NOT panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+
+ dt := expected.Sub(actual)
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+ var xf float64
+ xok := true
+
+ switch xn := x.(type) {
+ case uint8:
+ xf = float64(xn)
+ case uint16:
+ xf = float64(xn)
+ case uint32:
+ xf = float64(xn)
+ case uint64:
+ xf = float64(xn)
+ case int:
+ xf = float64(xn)
+ case int8:
+ xf = float64(xn)
+ case int16:
+ xf = float64(xn)
+ case int32:
+ xf = float64(xn)
+ case int64:
+ xf = float64(xn)
+ case float32:
+ xf = float64(xn)
+ case float64:
+ xf = float64(xn)
+ default:
+ xok = false
+ }
+
+ return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ }
+
+ dt := af - bf
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// min(|expected|, |actual|) * epsilon
+func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 {
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ // invalid input
+ return 0
+ }
+
+ if af < 0 {
+ af = -af
+ }
+ if bf < 0 {
+ bf = -bf
+ }
+ var delta float64
+ if af < bf {
+ delta = af * epsilon
+ } else {
+ delta = bf * epsilon
+ }
+ return delta
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ delta := calcEpsilonDelta(expected, actual, epsilon)
+
+ return InDelta(t, expected, actual, delta, msgAndArgs...)
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, actualObj, expectedObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if isNil(err) {
+ return true
+ }
+
+ return Fail(t, fmt.Sprintf("No error is expected but got %v", err), msgAndArgs...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err, "An error was expected") {
+// assert.Equal(t, err, expectedError)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ return NotNil(t, err, "An error is expected but got nil. %s", message)
+
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err, "An error was expected") {
+// assert.Equal(t, err, expectedError)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
+ return false
+ }
+ s := "An error with value \"%s\" is expected but got \"%s\". %s"
+ return Equal(t, theError.Error(), errString,
+ s, errString, theError.Error(), message)
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+ var r *regexp.Regexp
+ if rr, ok := rx.(*regexp.Regexp); ok {
+ r = rr
+ } else {
+ r = regexp.MustCompile(fmt.Sprint(rx))
+ }
+
+ return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexp(t TestingT, rx interface{}, str interface{}) bool {
+
+ match := matchRegexp(rx, str)
+
+ if !match {
+ Fail(t, "Expect \"%s\" to match \"%s\"")
+ }
+
+ return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexp(t TestingT, rx interface{}, str interface{}) bool {
+ match := matchRegexp(rx, str)
+
+ if match {
+ Fail(t, "Expect \"%s\" to NOT match \"%s\"")
+ }
+
+ return !match
+
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 0000000..1c6de28
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,150 @@
+// A set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// if you assert many times, use the below:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(a, b, "The two words should be the same.")
+// }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+//
+// Here is an overview of the assert functions:
+//
+// assert.Equal(t, expected, actual [, message [, format-args])
+//
+// assert.NotEqual(t, notExpected, actual [, message [, format-args]])
+//
+// assert.True(t, actualBool [, message [, format-args]])
+//
+// assert.False(t, actualBool [, message [, format-args]])
+//
+// assert.Nil(t, actualObject [, message [, format-args]])
+//
+// assert.NotNil(t, actualObject [, message [, format-args]])
+//
+// assert.Empty(t, actualObject [, message [, format-args]])
+//
+// assert.NotEmpty(t, actualObject [, message [, format-args]])
+//
+// assert.Len(t, actualObject, expectedLength, [, message [, format-args]])
+//
+// assert.Error(t, errorObject [, message [, format-args]])
+//
+// assert.NoError(t, errorObject [, message [, format-args]])
+//
+// assert.EqualError(t, theError, errString [, message [, format-args]])
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]])
+//
+// assert.IsType(t, expectedObject, actualObject [, message [, format-args]])
+//
+// assert.Contains(t, stringOrSlice, substringOrElement [, message [, format-args]])
+//
+// assert.NotContains(t, stringOrSlice, substringOrElement [, message [, format-args]])
+//
+// assert.Panics(t, func(){
+//
+// // call code that should panic
+//
+// } [, message [, format-args]])
+//
+// assert.NotPanics(t, func(){
+//
+// // call code that should not panic
+//
+// } [, message [, format-args]])
+//
+// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]])
+//
+// assert.InDelta(t, numA, numB, delta, [, message [, format-args]])
+//
+// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]])
+//
+// assert package contains Assertions object. it has assertion methods.
+//
+// Here is an overview of the assert functions:
+// assert.Equal(expected, actual [, message [, format-args])
+//
+// assert.NotEqual(notExpected, actual [, message [, format-args]])
+//
+// assert.True(actualBool [, message [, format-args]])
+//
+// assert.False(actualBool [, message [, format-args]])
+//
+// assert.Nil(actualObject [, message [, format-args]])
+//
+// assert.NotNil(actualObject [, message [, format-args]])
+//
+// assert.Empty(actualObject [, message [, format-args]])
+//
+// assert.NotEmpty(actualObject [, message [, format-args]])
+//
+// assert.Len(actualObject, expectedLength, [, message [, format-args]])
+//
+// assert.Error(errorObject [, message [, format-args]])
+//
+// assert.NoError(errorObject [, message [, format-args]])
+//
+// assert.EqualError(theError, errString [, message [, format-args]])
+//
+// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]])
+//
+// assert.IsType(expectedObject, actualObject [, message [, format-args]])
+//
+// assert.Contains(stringOrSlice, substringOrElement [, message [, format-args]])
+//
+// assert.NotContains(stringOrSlice, substringOrElement [, message [, format-args]])
+//
+// assert.Panics(func(){
+//
+// // call code that should panic
+//
+// } [, message [, format-args]])
+//
+// assert.NotPanics(func(){
+//
+// // call code that should not panic
+//
+// } [, message [, format-args]])
+//
+// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]])
+//
+// assert.InDelta(numA, numB, delta, [, message [, format-args]])
+//
+// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]])
+package assert
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 0000000..ac9dc9d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+ "errors"
+)
+
+// AnError is an error instance useful for testing. If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 0000000..e2866f8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,252 @@
+package assert
+
+import "time"
+
+type Assertions struct {
+ t TestingT
+}
+
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+ return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(123, 123, "123 and 123 should be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactly asserts that two objects are equal is value and type.
+//
+// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(err, "err should be something")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+ return NotNil(a.t, object, msgAndArgs...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(err, "err should be nothing")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+ return Nil(a.t, object, msgAndArgs...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a
+// slice with len == 0.
+//
+// assert.Empty(obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+ return Empty(a.t, object, msgAndArgs...)
+}
+
+// Empty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a
+// slice with len == 0.
+//
+// if assert.NotEmpty(obj) {
+// assert.Equal("two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+ return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(mySlice, 3, "The size of slice is not 3")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+ return Len(a.t, object, length, msgAndArgs...)
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(myBool, "myBool should be true")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+ return True(a.t, value, msgAndArgs...)
+}
+
+// False asserts that the specified value is true.
+//
+// assert.False(myBool, "myBool should be false")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+ return False(a.t, value, msgAndArgs...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// Contains asserts that the specified string contains the specified substring.
+//
+// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool {
+ return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContains asserts that the specified string does NOT contain the specified substring.
+//
+// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool {
+ return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// Uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+ return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(func(){
+// GoCrazy()
+// }, "Calling GoCrazy() should panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return Panics(a.t, f, msgAndArgs...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(func(){
+// RemainCalm()
+// }, "Calling RemainCalm() should NOT panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(err) {
+// assert.Equal(actualObj, expectedObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool {
+ return NoError(a.t, theError, msgAndArgs...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(err, "An error was expected") {
+// assert.Equal(err, expectedError)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool {
+ return Error(a.t, theError, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(err, "An error was expected") {
+// assert.Equal(err, expectedError)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+ return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexp(rx interface{}, str interface{}) bool {
+ return Regexp(a.t, rx, str)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}) bool {
+ return NotRegexp(a.t, rx, str)
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 0000000..0bcb6db
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,157 @@
+package assert
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1
+// if building a new request fails.
+func httpCode(handler http.HandlerFunc, mode, url string, values url.Values) int {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return -1
+ }
+ handler(w, req)
+ return w.Code
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool {
+ code := httpCode(handler, mode, url, values)
+ if code == -1 {
+ return false
+ }
+ return code >= http.StatusOK && code <= http.StatusPartialContent
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool {
+ code := httpCode(handler, mode, url, values)
+ if code == -1 {
+ return false
+ }
+ return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool {
+ code := httpCode(handler, mode, url, values)
+ if code == -1 {
+ return false
+ }
+ return code >= http.StatusBadRequest
+}
+
+// HttpBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HttpBody(handler http.HandlerFunc, mode, url string, values url.Values) string {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return ""
+ }
+ handler(w, req)
+ return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
+ body := HttpBody(handler, mode, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if !contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
+ body := HttpBody(handler, mode, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if contains {
+ Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)
+ }
+
+ return !contains
+}
+
+//
+// Assertions Wrappers
+//
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(myHandler, "POST", http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, mode, url string, values url.Values) bool {
+ return HTTPSuccess(a.t, handler, mode, url, values)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, mode, url string, values url.Values) bool {
+ return HTTPRedirect(a.t, handler, mode, url, values)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, mode, url string, values url.Values) bool {
+ return HTTPError(a.t, handler, mode, url, values)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
+ return HTTPBodyContains(a.t, handler, mode, url, values, str)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool {
+ return HTTPBodyNotContains(a.t, handler, mode, url, values, str)
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go
new file mode 100644
index 0000000..7d4e7b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go
@@ -0,0 +1,43 @@
+// Provides a system by which it is possible to mock your objects and verify calls are happening as expected.
+//
+// Example Usage
+//
+// The mock package provides an object, Mock, that tracks activity on another object. It is usually
+// embedded into a test object as shown below:
+//
+// type MyTestObject struct {
+// // add a Mock object instance
+// mock.Mock
+//
+// // other fields go here as normal
+// }
+//
+// When implementing the methods of an interface, you wire your functions up
+// to call the Mock.Called(args...) method, and return the appropriate values.
+//
+// For example, to mock a method that saves the name and age of a person and returns
+// the year of their birth or an error, you might write this:
+//
+// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) {
+// args := o.Mock.Called(firstname, lastname, age)
+// return args.Int(0), args.Error(1)
+// }
+//
+// The Int, Error and Bool methods are examples of strongly typed getters that take the argument
+// index position. Given this argument list:
+//
+// (12, true, "Something")
+//
+// You could read them out strongly typed like this:
+//
+// args.Int(0)
+// args.Bool(1)
+// args.String(2)
+//
+// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion:
+//
+// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine)
+//
+// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those
+// cases you should check for nil first.
+package mock
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go
new file mode 100644
index 0000000..f73fa25
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go
@@ -0,0 +1,510 @@
+package mock
+
+import (
+ "fmt"
+ "github.com/stretchr/objx"
+ "github.com/stretchr/testify/assert"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Logf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+}
+
+/*
+ Call
+*/
+
+// Call represents a method call and is used for setting expectations,
+// as well as recording activity.
+type Call struct {
+
+ // The name of the method that was or will be called.
+ Method string
+
+ // Holds the arguments of the method.
+ Arguments Arguments
+
+ // Holds the arguments that should be returned when
+ // this method is called.
+ ReturnArguments Arguments
+
+ // The number of times to return the return arguments when setting
+ // expectations. 0 means to always return the value.
+ Repeatability int
+}
+
+// Mock is the workhorse used to track activity on another object.
+// For an example of its usage, refer to the "Example Usage" section at the top of this document.
+type Mock struct {
+
+ // The method name that is currently
+ // being referred to by the On method.
+ onMethodName string
+
+ // An array of the arguments that are
+ // currently being referred to by the On method.
+ onMethodArguments Arguments
+
+ // Represents the calls that are expected of
+ // an object.
+ ExpectedCalls []Call
+
+ // Holds the calls that were made to this mocked object.
+ Calls []Call
+
+ // TestData holds any data that might be useful for testing. Testify ignores
+ // this data completely allowing you to do whatever you like with it.
+ testData objx.Map
+
+ mutex sync.Mutex
+}
+
+// TestData holds any data that might be useful for testing. Testify ignores
+// this data completely allowing you to do whatever you like with it.
+func (m *Mock) TestData() objx.Map {
+
+ if m.testData == nil {
+ m.testData = make(objx.Map)
+ }
+
+ return m.testData
+}
+
+/*
+ Setting expectations
+*/
+
+// On starts a description of an expectation of the specified method
+// being called.
+//
+// Mock.On("MyMethod", arg1, arg2)
+func (m *Mock) On(methodName string, arguments ...interface{}) *Mock {
+ m.onMethodName = methodName
+ m.onMethodArguments = arguments
+ return m
+}
+
+// Return finishes a description of an expectation of the method (and arguments)
+// specified in the most recent On method call.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2)
+func (m *Mock) Return(returnArguments ...interface{}) *Mock {
+ m.ExpectedCalls = append(m.ExpectedCalls, Call{m.onMethodName, m.onMethodArguments, returnArguments, 0})
+ return m
+}
+
+// Once indicates that that the mock should only return the value once.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
+func (m *Mock) Once() {
+ m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 1
+}
+
+// Twice indicates that that the mock should only return the value twice.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice()
+func (m *Mock) Twice() {
+ m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 2
+}
+
+// Times indicates that that the mock should only return the indicated number
+// of times.
+//
+// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5)
+func (m *Mock) Times(i int) {
+ m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = i
+}
+
+/*
+ Recording and responding to activity
+*/
+
+func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) {
+ for i, call := range m.ExpectedCalls {
+ if call.Method == method && call.Repeatability > -1 {
+
+ _, diffCount := call.Arguments.Diff(arguments)
+ if diffCount == 0 {
+ return i, &call
+ }
+
+ }
+ }
+ return -1, nil
+}
+
+func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) {
+
+ diffCount := 0
+ var closestCall *Call = nil
+
+ for _, call := range m.ExpectedCalls {
+ if call.Method == method {
+
+ _, tempDiffCount := call.Arguments.Diff(arguments)
+ if tempDiffCount < diffCount || diffCount == 0 {
+ diffCount = tempDiffCount
+ closestCall = &call
+ }
+
+ }
+ }
+
+ if closestCall == nil {
+ return false, nil
+ }
+
+ return true, closestCall
+}
+
+func callString(method string, arguments Arguments, includeArgumentValues bool) string {
+
+ var argValsString string = ""
+ if includeArgumentValues {
+ var argVals []string
+ for argIndex, arg := range arguments {
+ argVals = append(argVals, fmt.Sprintf("%d: %v", argIndex, arg))
+ }
+ argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t"))
+ }
+
+ return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString)
+}
+
+// Called tells the mock object that a method has been called, and gets an array
+// of arguments to return. Panics if the call is unexpected (i.e. not preceeded by
+// appropriate .On .Return() calls)
+func (m *Mock) Called(arguments ...interface{}) Arguments {
+ defer m.mutex.Unlock()
+ m.mutex.Lock()
+
+ // get the calling function's name
+ pc, _, _, ok := runtime.Caller(1)
+ if !ok {
+ panic("Couldn't get the caller information")
+ }
+ functionPath := runtime.FuncForPC(pc).Name()
+ parts := strings.Split(functionPath, ".")
+ functionName := parts[len(parts)-1]
+
+ found, call := m.findExpectedCall(functionName, arguments...)
+
+ switch {
+ case found < 0:
+ // we have to fail here - because we don't know what to do
+ // as the return arguments. This is because:
+ //
+ // a) this is a totally unexpected call to this method,
+ // b) the arguments are not what was expected, or
+ // c) the developer has forgotten to add an accompanying On...Return pair.
+
+ closestFound, closestCall := m.findClosestCall(functionName, arguments...)
+
+ if closestFound {
+ panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true)))
+ } else {
+ panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo()))
+ }
+ case call.Repeatability == 1:
+ call.Repeatability = -1
+ m.ExpectedCalls[found] = *call
+ case call.Repeatability > 1:
+ call.Repeatability -= 1
+ m.ExpectedCalls[found] = *call
+ }
+
+ // add the call
+ m.Calls = append(m.Calls, Call{functionName, arguments, make([]interface{}, 0), 0})
+
+ return call.ReturnArguments
+
+}
+
+/*
+ Assertions
+*/
+
+// AssertExpectationsForObjects asserts that everything specified with On and Return
+// of the specified objects was in fact called as expected.
+//
+// Calls may have occurred in any order.
+func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
+ var success bool = true
+ for _, obj := range testObjects {
+ mockObj := obj.(Mock)
+ success = success && mockObj.AssertExpectations(t)
+ }
+ return success
+}
+
+// AssertExpectations asserts that everything specified with On and Return was
+// in fact called as expected. Calls may have occurred in any order.
+func (m *Mock) AssertExpectations(t TestingT) bool {
+
+ var somethingMissing bool = false
+ var failedExpectations int = 0
+
+ // iterate through each expectation
+ for _, expectedCall := range m.ExpectedCalls {
+ switch {
+ case !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments):
+ somethingMissing = true
+ failedExpectations++
+ t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
+ case expectedCall.Repeatability > 0:
+ somethingMissing = true
+ failedExpectations++
+ default:
+ t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
+ }
+ }
+
+ if somethingMissing {
+ t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(m.ExpectedCalls)-failedExpectations, len(m.ExpectedCalls), failedExpectations, assert.CallerInfo())
+ }
+
+ return !somethingMissing
+}
+
+// AssertNumberOfCalls asserts that the method was called expectedCalls times.
+func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool {
+ var actualCalls int = 0
+ for _, call := range m.Calls {
+ if call.Method == methodName {
+ actualCalls++
+ }
+ }
+ return assert.Equal(t, actualCalls, expectedCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls))
+}
+
+// AssertCalled asserts that the method was called.
+func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool {
+ if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) {
+ t.Logf("%s", m.ExpectedCalls)
+ return false
+ }
+ return true
+}
+
+// AssertNotCalled asserts that the method was not called.
+func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool {
+ if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) {
+ t.Logf("%s", m.ExpectedCalls)
+ return false
+ }
+ return true
+}
+
+func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool {
+ for _, call := range m.Calls {
+ if call.Method == methodName {
+
+ _, differences := Arguments(expected).Diff(call.Arguments)
+
+ if differences == 0 {
+ // found the expected call
+ return true
+ }
+
+ }
+ }
+ // we didn't find the expected call
+ return false
+}
+
+/*
+ Arguments
+*/
+
+// Arguments holds an array of method arguments or return values.
+type Arguments []interface{}
+
+const (
+ // The "any" argument. Used in Diff and Assert when
+ // the argument being tested shouldn't be taken into consideration.
+ Anything string = "mock.Anything"
+)
+
+// AnythingOfTypeArgument is a string that contains the type of an argument
+// for use when type checking. Used in Diff and Assert.
+type AnythingOfTypeArgument string
+
+// AnythingOfType returns an AnythingOfTypeArgument object containing the
+// name of the type to check for. Used in Diff and Assert.
+//
+// For example:
+// Assert(t, AnythingOfType("string"), AnythingOfType("int"))
+func AnythingOfType(t string) AnythingOfTypeArgument {
+ return AnythingOfTypeArgument(t)
+}
+
+// Get Returns the argument at the specified index.
+func (args Arguments) Get(index int) interface{} {
+ if index+1 > len(args) {
+ panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args)))
+ }
+ return args[index]
+}
+
+// Is gets whether the objects match the arguments specified.
+func (args Arguments) Is(objects ...interface{}) bool {
+ for i, obj := range args {
+ if obj != objects[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Diff gets a string describing the differences between the arguments
+// and the specified objects.
+//
+// Returns the diff string and number of differences found.
+func (args Arguments) Diff(objects []interface{}) (string, int) {
+
+ var output string = "\n"
+ var differences int
+
+ var maxArgCount int = len(args)
+ if len(objects) > maxArgCount {
+ maxArgCount = len(objects)
+ }
+
+ for i := 0; i < maxArgCount; i++ {
+ var actual, expected interface{}
+
+ if len(objects) <= i {
+ actual = "(Missing)"
+ } else {
+ actual = objects[i]
+ }
+
+ if len(args) <= i {
+ expected = "(Missing)"
+ } else {
+ expected = args[i]
+ }
+
+ if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
+
+ // type checking
+ if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
+ // not match
+ differences++
+ output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual)
+ }
+
+ } else {
+
+ // normal checking
+
+ if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
+ // match
+ output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected)
+ } else {
+ // not match
+ differences++
+ output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected)
+ }
+ }
+
+ }
+
+ if differences == 0 {
+ return "No differences.", differences
+ }
+
+ return output, differences
+
+}
+
+// Assert compares the arguments with the specified objects and fails if
+// they do not exactly match.
+func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
+
+ // get the differences
+ diff, diffCount := args.Diff(objects)
+
+ if diffCount == 0 {
+ return true
+ }
+
+ // there are differences... report them...
+ t.Logf(diff)
+ t.Errorf("%sArguments do not match.", assert.CallerInfo())
+
+ return false
+
+}
+
+// String gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+//
+// If no index is provided, String() returns a complete string representation
+// of the arguments.
+func (args Arguments) String(indexOrNil ...int) string {
+
+ if len(indexOrNil) == 0 {
+ // normal String() method - return a string representation of the args
+ var argsStr []string
+ for _, arg := range args {
+ argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg)))
+ }
+ return strings.Join(argsStr, ",")
+ } else if len(indexOrNil) == 1 {
+ // Index has been specified - get the argument at that index
+ var index int = indexOrNil[0]
+ var s string
+ var ok bool
+ if s, ok = args.Get(index).(string); !ok {
+ panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
+ }
+ return s
+ }
+
+ panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil)))
+
+}
+
+// Int gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+func (args Arguments) Int(index int) int {
+ var s int
+ var ok bool
+ if s, ok = args.Get(index).(int); !ok {
+ panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ }
+ return s
+}
+
+// Error gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+func (args Arguments) Error(index int) error {
+ obj := args.Get(index)
+ var s error
+ var ok bool
+ if obj == nil {
+ return nil
+ }
+ if s, ok = obj.(error); !ok {
+ panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ }
+ return s
+}
+
+// Bool gets the argument at the specified index. Panics if there is no argument, or
+// if the argument is of the wrong type.
+func (args Arguments) Bool(index int) bool {
+ var s bool
+ var ok bool
+ if s, ok = args.Get(index).(bool); !ok {
+ panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
+ }
+ return s
+}
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go
new file mode 100644
index 0000000..7b38438
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/doc.go
@@ -0,0 +1,77 @@
+// Alternative testing tools which stop test execution if test failed.
+//
+// Example Usage
+//
+// The following is a complete example using require in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/require"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// require.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// Assertions
+//
+// The `require` package have same global functions as in the `assert` package,
+// but instead of returning a boolean result they call `t.FailNow()`.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+//
+// Here is an overview of the assert functions:
+//
+// require.Equal(t, expected, actual [, message [, format-args])
+//
+// require.NotEqual(t, notExpected, actual [, message [, format-args]])
+//
+// require.True(t, actualBool [, message [, format-args]])
+//
+// require.False(t, actualBool [, message [, format-args]])
+//
+// require.Nil(t, actualObject [, message [, format-args]])
+//
+// require.NotNil(t, actualObject [, message [, format-args]])
+//
+// require.Empty(t, actualObject [, message [, format-args]])
+//
+// require.NotEmpty(t, actualObject [, message [, format-args]])
+//
+// require.Error(t, errorObject [, message [, format-args]])
+//
+// require.NoError(t, errorObject [, message [, format-args]])
+//
+// require.EqualError(t, theError, errString [, message [, format-args]])
+//
+// require.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]])
+//
+// require.IsType(t, expectedObject, actualObject [, message [, format-args]])
+//
+// require.Contains(t, string, substring [, message [, format-args]])
+//
+// require.NotContains(t, string, substring [, message [, format-args]])
+//
+// require.Panics(t, func(){
+//
+// // call code that should panic
+//
+// } [, message [, format-args]])
+//
+// require.NotPanics(t, func(){
+//
+// // call code that should not panic
+//
+// } [, message [, format-args]])
+//
+// require.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]])
+//
+// require.InDelta(t, numA, numB, delta, [, message [, format-args]])
+//
+// require.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]])
+package require
diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go b/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go
new file mode 100644
index 0000000..6744d8b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/stretchr/testify/require/requirements.go
@@ -0,0 +1,267 @@
+package require
+
+import (
+ "github.com/stretchr/testify/assert"
+ "time"
+)
+
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+ FailNow()
+}
+
+// Fail reports a failure through
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
+ assert.Fail(t, failureMessage, msgAndArgs...)
+ t.FailNow()
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// require.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.IsType(t, expectedType, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Equal asserts that two objects are equal.
+//
+// require.Equal(t, 123, 123, "123 and 123 should be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.Equal(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Exactly asserts that two objects are equal is value and type.
+//
+// require.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.Exactly(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// require.NotNil(t, err, "err should be something")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotNil(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Nil asserts that the specified object is nil.
+//
+// require.Nil(t, err, "err should be nothing")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.Nil(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// require.Empty(t, obj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.Empty(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// require.NotEmpty(t, obj)
+// require.Equal(t, "one", obj[0])
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotEmpty(t, object, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// True asserts that the specified value is true.
+//
+// require.True(t, myBool, "myBool should be true")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func True(t TestingT, value bool, msgAndArgs ...interface{}) {
+ if !assert.True(t, value, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// False asserts that the specified value is true.
+//
+// require.False(t, myBool, "myBool should be false")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func False(t TestingT, value bool, msgAndArgs ...interface{}) {
+ if !assert.False(t, value, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// require.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) {
+ if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Contains asserts that the specified string contains the specified substring.
+//
+// require.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Contains(t TestingT, s, contains string, msgAndArgs ...interface{}) {
+ if !assert.Contains(t, s, contains, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotContains asserts that the specified string does NOT contain the specified substring.
+//
+// require.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContains(t TestingT, s, contains string, msgAndArgs ...interface{}) {
+ if !assert.NotContains(t, s, contains, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
+ if !assert.Condition(t, comp, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// require.Panics(t, func(){
+// GoCrazy()
+// }, "Calling GoCrazy() should panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if !assert.Panics(t, f, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// require.NotPanics(t, func(){
+// RemainCalm()
+// }, "Calling RemainCalm() should NOT panic")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
+ if !assert.NotPanics(t, f, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
+ if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// require.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) {
+ if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
+ if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// require.NoError(t, err)
+// require.Equal(t, actualObj, expectedObj)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
+ if !assert.NoError(t, err, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// require.Error(t, err, "An error was expected")
+// require.Equal(t, err, expectedError)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Error(t TestingT, err error, msgAndArgs ...interface{}) {
+ if !assert.Error(t, err, msgAndArgs...) {
+ t.FailNow()
+ }
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// require.Error(t, err, "An error was expected")
+// require.Equal(t, err, expectedError)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
+ if !assert.EqualError(t, theError, errString, msgAndArgs...) {
+ t.FailNow()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE b/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE
new file mode 100644
index 0000000..80dd96d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2013 Suryandaru Triandana
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability.go
new file mode 100644
index 0000000..c13f4e5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package capability provides utilities for manipulating POSIX capabilities.
+package capability
+
+type Capabilities interface {
+ // Get check whether a capability present in the given
+ // capabilities set. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE or BOUNDING.
+ Get(which CapType, what Cap) bool
+
+ // Empty check whether all capability bits of the given capabilities
+ // set are zero. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE or BOUNDING.
+ Empty(which CapType) bool
+
+ // Full check whether all capability bits of the given capabilities
+ // set are one. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE or BOUNDING.
+ Full(which CapType) bool
+
+ // Set sets capabilities of the given capabilities sets. The
+ // 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+ // PERMITTED, INHERITABLE or BOUNDING.
+ Set(which CapType, caps ...Cap)
+
+ // Unset unsets capabilities of the given capabilities sets. The
+ // 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+ // PERMITTED, INHERITABLE or BOUNDING.
+ Unset(which CapType, caps ...Cap)
+
+ // Fill sets all bits of the given capabilities kind to one. The
+ // 'kind' value should be one or combination (OR'ed) of CAPS or
+ // BOUNDS.
+ Fill(kind CapType)
+
+ // Clear sets all bits of the given capabilities kind to zero. The
+ // 'kind' value should be one or combination (OR'ed) of CAPS or
+ // BOUNDS.
+ Clear(kind CapType)
+
+ // String return current capabilities state of the given capabilities
+ // set as string. The 'which' value should be one of EFFECTIVE,
+ // PERMITTED, INHERITABLE or BOUNDING.
+ StringCap(which CapType) string
+
+ // String return current capabilities state as string.
+ String() string
+
+ // Load load actual capabilities value. This will overwrite all
+ // outstanding changes.
+ Load() error
+
+ // Apply apply the capabilities settings, so all changes will take
+ // effect.
+ Apply(kind CapType) error
+}
+
+// NewPid create new initialized Capabilities object for given pid when it
+// is nonzero, or for the current pid if pid is 0
+func NewPid(pid int) (Capabilities, error) {
+ return newPid(pid)
+}
+
+// NewFile create new initialized Capabilities object for given named file.
+func NewFile(name string) (Capabilities, error) {
+ return newFile(name)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_linux.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_linux.go
new file mode 100644
index 0000000..3dfcd39
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_linux.go
@@ -0,0 +1,608 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package capability
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "syscall"
+)
+
+var errUnknownVers = errors.New("unknown capability version")
+
+const (
+ linuxCapVer1 = 0x19980330
+ linuxCapVer2 = 0x20071026
+ linuxCapVer3 = 0x20080522
+)
+
+var (
+ capVers uint32
+ capLastCap Cap
+)
+
+func init() {
+ var hdr capHeader
+ capget(&hdr, nil)
+ capVers = hdr.version
+
+ if initLastCap() == nil {
+ CAP_LAST_CAP = capLastCap
+ if capLastCap > 31 {
+ capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1
+ } else {
+ capUpperMask = 0
+ }
+ }
+}
+
+func initLastCap() error {
+ if capLastCap != 0 {
+ return nil
+ }
+
+ f, err := os.Open("/proc/sys/kernel/cap_last_cap")
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var b []byte = make([]byte, 11)
+ _, err = f.Read(b)
+ if err != nil {
+ return err
+ }
+
+ fmt.Sscanf(string(b), "%d", &capLastCap)
+
+ return nil
+}
+
+func mkStringCap(c Capabilities, which CapType) (ret string) {
+ for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ {
+ if !c.Get(which, i) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ ret += ", "
+ }
+ ret += i.String()
+ }
+ return
+}
+
+func mkString(c Capabilities, max CapType) (ret string) {
+ ret = "{"
+ for i := CapType(1); i <= max; i <<= 1 {
+ ret += " " + i.String() + "=\""
+ if c.Empty(i) {
+ ret += "empty"
+ } else if c.Full(i) {
+ ret += "full"
+ } else {
+ ret += c.StringCap(i)
+ }
+ ret += "\""
+ }
+ ret += " }"
+ return
+}
+
+func newPid(pid int) (c Capabilities, err error) {
+ switch capVers {
+ case linuxCapVer1:
+ p := new(capsV1)
+ p.hdr.version = capVers
+ p.hdr.pid = pid
+ c = p
+ case linuxCapVer2, linuxCapVer3:
+ p := new(capsV3)
+ p.hdr.version = capVers
+ p.hdr.pid = pid
+ c = p
+ default:
+ err = errUnknownVers
+ return
+ }
+ err = c.Load()
+ if err != nil {
+ c = nil
+ }
+ return
+}
+
+type capsV1 struct {
+ hdr capHeader
+ data capData
+}
+
+func (c *capsV1) Get(which CapType, what Cap) bool {
+ if what > 32 {
+ return false
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1< 32 {
+ continue
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data.permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data.inheritable |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsV1) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ if what > 32 {
+ continue
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data.permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data.inheritable &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsV1) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective = 0x7fffffff
+ c.data.permitted = 0x7fffffff
+ c.data.inheritable = 0
+ }
+}
+
+func (c *capsV1) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective = 0
+ c.data.permitted = 0
+ c.data.inheritable = 0
+ }
+}
+
+func (c *capsV1) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsV1) String() (ret string) {
+ return mkString(c, BOUNDING)
+}
+
+func (c *capsV1) Load() (err error) {
+ return capget(&c.hdr, &c.data)
+}
+
+func (c *capsV1) Apply(kind CapType) error {
+ if kind&CAPS == CAPS {
+ return capset(&c.hdr, &c.data)
+ }
+ return nil
+}
+
+type capsV3 struct {
+ hdr capHeader
+ data [2]capData
+ bounds [2]uint32
+}
+
+func (c *capsV3) Get(which CapType, what Cap) bool {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1< 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data[i].effective |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data[i].permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data[i].inheritable |= 1 << uint(what)
+ }
+ if which&BOUNDING != 0 {
+ c.bounds[i] |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsV3) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data[i].effective &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data[i].permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data[i].inheritable &= ^(1 << uint(what))
+ }
+ if which&BOUNDING != 0 {
+ c.bounds[i] &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsV3) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data[0].effective = 0xffffffff
+ c.data[0].permitted = 0xffffffff
+ c.data[0].inheritable = 0
+ c.data[1].effective = 0xffffffff
+ c.data[1].permitted = 0xffffffff
+ c.data[1].inheritable = 0
+ }
+
+ if kind&BOUNDS == BOUNDS {
+ c.bounds[0] = 0xffffffff
+ c.bounds[1] = 0xffffffff
+ }
+}
+
+func (c *capsV3) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data[0].effective = 0
+ c.data[0].permitted = 0
+ c.data[0].inheritable = 0
+ c.data[1].effective = 0
+ c.data[1].permitted = 0
+ c.data[1].inheritable = 0
+ }
+
+ if kind&BOUNDS == BOUNDS {
+ c.bounds[0] = 0
+ c.bounds[1] = 0
+ }
+}
+
+func (c *capsV3) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsV3) String() (ret string) {
+ return mkString(c, BOUNDING)
+}
+
+func (c *capsV3) Load() (err error) {
+ err = capget(&c.hdr, &c.data[0])
+ if err != nil {
+ return
+ }
+
+ var status_path string
+
+ if c.hdr.pid == 0 {
+ status_path = fmt.Sprintf("/proc/self/status")
+ } else {
+ status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
+ }
+
+ f, err := os.Open(status_path)
+ if err != nil {
+ return
+ }
+ b := bufio.NewReader(f)
+ for {
+ line, e := b.ReadString('\n')
+ if e != nil {
+ if e != io.EOF {
+ err = e
+ }
+ break
+ }
+ if strings.HasPrefix(line, "CapB") {
+ fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
+ break
+ }
+ }
+ f.Close()
+
+ return
+}
+
+func (c *capsV3) Apply(kind CapType) (err error) {
+ if kind&BOUNDS == BOUNDS {
+ var data [2]capData
+ err = capget(&c.hdr, &data[0])
+ if err != nil {
+ return
+ }
+ if (1< 31 {
+ if c.data.version == 1 {
+ return false
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ switch which {
+ case EFFECTIVE:
+ return (1< 31 {
+ if c.data.version == 1 {
+ continue
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective[i] |= 1 << uint(what)
+ }
+ if which&PERMITTED != 0 {
+ c.data.data[i].permitted |= 1 << uint(what)
+ }
+ if which&INHERITABLE != 0 {
+ c.data.data[i].inheritable |= 1 << uint(what)
+ }
+ }
+}
+
+func (c *capsFile) Unset(which CapType, caps ...Cap) {
+ for _, what := range caps {
+ var i uint
+ if what > 31 {
+ if c.data.version == 1 {
+ continue
+ }
+ i = uint(what) >> 5
+ what %= 32
+ }
+
+ if which&EFFECTIVE != 0 {
+ c.data.effective[i] &= ^(1 << uint(what))
+ }
+ if which&PERMITTED != 0 {
+ c.data.data[i].permitted &= ^(1 << uint(what))
+ }
+ if which&INHERITABLE != 0 {
+ c.data.data[i].inheritable &= ^(1 << uint(what))
+ }
+ }
+}
+
+func (c *capsFile) Fill(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective[0] = 0xffffffff
+ c.data.data[0].permitted = 0xffffffff
+ c.data.data[0].inheritable = 0
+ if c.data.version == 2 {
+ c.data.effective[1] = 0xffffffff
+ c.data.data[1].permitted = 0xffffffff
+ c.data.data[1].inheritable = 0
+ }
+ }
+}
+
+func (c *capsFile) Clear(kind CapType) {
+ if kind&CAPS == CAPS {
+ c.data.effective[0] = 0
+ c.data.data[0].permitted = 0
+ c.data.data[0].inheritable = 0
+ if c.data.version == 2 {
+ c.data.effective[1] = 0
+ c.data.data[1].permitted = 0
+ c.data.data[1].inheritable = 0
+ }
+ }
+}
+
+func (c *capsFile) StringCap(which CapType) (ret string) {
+ return mkStringCap(c, which)
+}
+
+func (c *capsFile) String() (ret string) {
+ return mkString(c, INHERITABLE)
+}
+
+func (c *capsFile) Load() (err error) {
+ return getVfsCap(c.path, &c.data)
+}
+
+func (c *capsFile) Apply(kind CapType) (err error) {
+ if kind&CAPS == CAPS {
+ return setVfsCap(c.path, &c.data)
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_noop.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_noop.go
new file mode 100644
index 0000000..9bb3070
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_noop.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !linux
+
+package capability
+
+import "errors"
+
+func newPid(pid int) (Capabilities, error) {
+ return nil, errors.New("not supported")
+}
+
+func newFile(path string) (Capabilities, error) {
+ return nil, errors.New("not supported")
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum.go
new file mode 100644
index 0000000..fd0ce7f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package capability
+
+type CapType uint
+
+func (c CapType) String() string {
+ switch c {
+ case EFFECTIVE:
+ return "effective"
+ case PERMITTED:
+ return "permitted"
+ case INHERITABLE:
+ return "inheritable"
+ case BOUNDING:
+ return "bounding"
+ case CAPS:
+ return "caps"
+ }
+ return "unknown"
+}
+
+const (
+ EFFECTIVE CapType = 1 << iota
+ PERMITTED
+ INHERITABLE
+ BOUNDING
+
+ CAPS = EFFECTIVE | PERMITTED | INHERITABLE
+ BOUNDS = BOUNDING
+)
+
+//go:generate go run enumgen/gen.go
+type Cap int
+
+// POSIX-draft defined capabilities.
+const (
+ // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
+ // overrides the restriction of changing file ownership and group
+ // ownership.
+ CAP_CHOWN = Cap(0)
+
+ // Override all DAC access, including ACL execute access if
+ // [_POSIX_ACL] is defined. Excluding DAC access covered by
+ // CAP_LINUX_IMMUTABLE.
+ CAP_DAC_OVERRIDE = Cap(1)
+
+ // Overrides all DAC restrictions regarding read and search on files
+ // and directories, including ACL restrictions if [_POSIX_ACL] is
+ // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE.
+ CAP_DAC_READ_SEARCH = Cap(2)
+
+ // Overrides all restrictions about allowed operations on files, where
+ // file owner ID must be equal to the user ID, except where CAP_FSETID
+ // is applicable. It doesn't override MAC and DAC restrictions.
+ CAP_FOWNER = Cap(3)
+
+ // Overrides the following restrictions that the effective user ID
+ // shall match the file owner ID when setting the S_ISUID and S_ISGID
+ // bits on that file; that the effective group ID (or one of the
+ // supplementary group IDs) shall match the file owner ID when setting
+ // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
+ // cleared on successful return from chown(2) (not implemented).
+ CAP_FSETID = Cap(4)
+
+ // Overrides the restriction that the real or effective user ID of a
+ // process sending a signal must match the real or effective user ID
+ // of the process receiving the signal.
+ CAP_KILL = Cap(5)
+
+ // Allows setgid(2) manipulation
+ // Allows setgroups(2)
+ // Allows forged gids on socket credentials passing.
+ CAP_SETGID = Cap(6)
+
+ // Allows set*uid(2) manipulation (including fsuid).
+ // Allows forged pids on socket credentials passing.
+ CAP_SETUID = Cap(7)
+
+ // Linux-specific capabilities
+
+ // Without VFS support for capabilities:
+ // Transfer any capability in your permitted set to any pid,
+ // remove any capability in your permitted set from any pid
+ // With VFS support for capabilities (neither of above, but)
+ // Add any capability from current's capability bounding set
+ // to the current process' inheritable set
+ // Allow taking bits out of capability bounding set
+ // Allow modification of the securebits for a process
+ CAP_SETPCAP = Cap(8)
+
+ // Allow modification of S_IMMUTABLE and S_APPEND file attributes
+ CAP_LINUX_IMMUTABLE = Cap(9)
+
+ // Allows binding to TCP/UDP sockets below 1024
+ // Allows binding to ATM VCIs below 32
+ CAP_NET_BIND_SERVICE = Cap(10)
+
+ // Allow broadcasting, listen to multicast
+ CAP_NET_BROADCAST = Cap(11)
+
+ // Allow interface configuration
+ // Allow administration of IP firewall, masquerading and accounting
+ // Allow setting debug option on sockets
+ // Allow modification of routing tables
+ // Allow setting arbitrary process / process group ownership on
+ // sockets
+ // Allow binding to any address for transparent proxying (also via NET_RAW)
+ // Allow setting TOS (type of service)
+ // Allow setting promiscuous mode
+ // Allow clearing driver statistics
+ // Allow multicasting
+ // Allow read/write of device-specific registers
+ // Allow activation of ATM control sockets
+ CAP_NET_ADMIN = Cap(12)
+
+ // Allow use of RAW sockets
+ // Allow use of PACKET sockets
+ // Allow binding to any address for transparent proxying (also via NET_ADMIN)
+ CAP_NET_RAW = Cap(13)
+
+ // Allow locking of shared memory segments
+ // Allow mlock and mlockall (which doesn't really have anything to do
+ // with IPC)
+ CAP_IPC_LOCK = Cap(14)
+
+ // Override IPC ownership checks
+ CAP_IPC_OWNER = Cap(15)
+
+ // Insert and remove kernel modules - modify kernel without limit
+ CAP_SYS_MODULE = Cap(16)
+
+ // Allow ioperm/iopl access
+ // Allow sending USB messages to any device via /proc/bus/usb
+ CAP_SYS_RAWIO = Cap(17)
+
+ // Allow use of chroot()
+ CAP_SYS_CHROOT = Cap(18)
+
+ // Allow ptrace() of any process
+ CAP_SYS_PTRACE = Cap(19)
+
+ // Allow configuration of process accounting
+ CAP_SYS_PACCT = Cap(20)
+
+ // Allow configuration of the secure attention key
+ // Allow administration of the random device
+ // Allow examination and configuration of disk quotas
+ // Allow setting the domainname
+ // Allow setting the hostname
+ // Allow calling bdflush()
+ // Allow mount() and umount(), setting up new smb connection
+ // Allow some autofs root ioctls
+ // Allow nfsservctl
+ // Allow VM86_REQUEST_IRQ
+ // Allow to read/write pci config on alpha
+ // Allow irix_prctl on mips (setstacksize)
+ // Allow flushing all cache on m68k (sys_cacheflush)
+ // Allow removing semaphores
+ // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
+ // and shared memory
+ // Allow locking/unlocking of shared memory segment
+ // Allow turning swap on/off
+ // Allow forged pids on socket credentials passing
+ // Allow setting readahead and flushing buffers on block devices
+ // Allow setting geometry in floppy driver
+ // Allow turning DMA on/off in xd driver
+ // Allow administration of md devices (mostly the above, but some
+ // extra ioctls)
+ // Allow tuning the ide driver
+ // Allow access to the nvram device
+ // Allow administration of apm_bios, serial and bttv (TV) device
+ // Allow manufacturer commands in isdn CAPI support driver
+ // Allow reading non-standardized portions of pci configuration space
+ // Allow DDI debug ioctl on sbpcd driver
+ // Allow setting up serial ports
+ // Allow sending raw qic-117 commands
+ // Allow enabling/disabling tagged queuing on SCSI controllers and sending
+ // arbitrary SCSI commands
+ // Allow setting encryption key on loopback filesystem
+ // Allow setting zone reclaim policy
+ CAP_SYS_ADMIN = Cap(21)
+
+ // Allow use of reboot()
+ CAP_SYS_BOOT = Cap(22)
+
+ // Allow raising priority and setting priority on other (different
+ // UID) processes
+ // Allow use of FIFO and round-robin (realtime) scheduling on own
+ // processes and setting the scheduling algorithm used by another
+ // process.
+ // Allow setting cpu affinity on other processes
+ CAP_SYS_NICE = Cap(23)
+
+ // Override resource limits. Set resource limits.
+ // Override quota limits.
+ // Override reserved space on ext2 filesystem
+ // Modify data journaling mode on ext3 filesystem (uses journaling
+ // resources)
+ // NOTE: ext2 honors fsuid when checking for resource overrides, so
+ // you can override using fsuid too
+ // Override size restrictions on IPC message queues
+ // Allow more than 64hz interrupts from the real-time clock
+ // Override max number of consoles on console allocation
+ // Override max number of keymaps
+ CAP_SYS_RESOURCE = Cap(24)
+
+ // Allow manipulation of system clock
+ // Allow irix_stime on mips
+ // Allow setting the real-time clock
+ CAP_SYS_TIME = Cap(25)
+
+ // Allow configuration of tty devices
+ // Allow vhangup() of tty
+ CAP_SYS_TTY_CONFIG = Cap(26)
+
+ // Allow the privileged aspects of mknod()
+ CAP_MKNOD = Cap(27)
+
+ // Allow taking of leases on files
+ CAP_LEASE = Cap(28)
+
+ CAP_AUDIT_WRITE = Cap(29)
+ CAP_AUDIT_CONTROL = Cap(30)
+ CAP_SETFCAP = Cap(31)
+
+ // Override MAC access.
+ // The base kernel enforces no MAC policy.
+ // An LSM may enforce a MAC policy, and if it does and it chooses
+ // to implement capability based overrides of that policy, this is
+ // the capability it should use to do so.
+ CAP_MAC_OVERRIDE = Cap(32)
+
+ // Allow MAC configuration or state changes.
+ // The base kernel requires no MAC configuration.
+ // An LSM may enforce a MAC policy, and if it does and it chooses
+ // to implement capability based checks on modifications to that
+ // policy or the data required to maintain it, this is the
+ // capability it should use to do so.
+ CAP_MAC_ADMIN = Cap(33)
+
+ // Allow configuring the kernel's syslog (printk behaviour)
+ CAP_SYSLOG = Cap(34)
+
+ // Allow triggering something that will wake the system
+ CAP_WAKE_ALARM = Cap(35)
+
+ // Allow preventing system suspends
+ CAP_BLOCK_SUSPEND = Cap(36)
+
+ // Allow reading audit messages from the kernel
+ CAP_AUDIT_READ = Cap(37)
+)
+
+var (
+ // Highest valid capability of the running kernel.
+ CAP_LAST_CAP = Cap(63)
+
+ capUpperMask = ^uint32(0)
+)
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum_gen.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum_gen.go
new file mode 100644
index 0000000..b9e6d2d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum_gen.go
@@ -0,0 +1,129 @@
+// generated file; DO NOT EDIT - use go generate in directory with source
+
+package capability
+
+func (c Cap) String() string {
+ switch c {
+ case CAP_CHOWN:
+ return "chown"
+ case CAP_DAC_OVERRIDE:
+ return "dac_override"
+ case CAP_DAC_READ_SEARCH:
+ return "dac_read_search"
+ case CAP_FOWNER:
+ return "fowner"
+ case CAP_FSETID:
+ return "fsetid"
+ case CAP_KILL:
+ return "kill"
+ case CAP_SETGID:
+ return "setgid"
+ case CAP_SETUID:
+ return "setuid"
+ case CAP_SETPCAP:
+ return "setpcap"
+ case CAP_LINUX_IMMUTABLE:
+ return "linux_immutable"
+ case CAP_NET_BIND_SERVICE:
+ return "net_bind_service"
+ case CAP_NET_BROADCAST:
+ return "net_broadcast"
+ case CAP_NET_ADMIN:
+ return "net_admin"
+ case CAP_NET_RAW:
+ return "net_raw"
+ case CAP_IPC_LOCK:
+ return "ipc_lock"
+ case CAP_IPC_OWNER:
+ return "ipc_owner"
+ case CAP_SYS_MODULE:
+ return "sys_module"
+ case CAP_SYS_RAWIO:
+ return "sys_rawio"
+ case CAP_SYS_CHROOT:
+ return "sys_chroot"
+ case CAP_SYS_PTRACE:
+ return "sys_ptrace"
+ case CAP_SYS_PACCT:
+ return "sys_pacct"
+ case CAP_SYS_ADMIN:
+ return "sys_admin"
+ case CAP_SYS_BOOT:
+ return "sys_boot"
+ case CAP_SYS_NICE:
+ return "sys_nice"
+ case CAP_SYS_RESOURCE:
+ return "sys_resource"
+ case CAP_SYS_TIME:
+ return "sys_time"
+ case CAP_SYS_TTY_CONFIG:
+ return "sys_tty_config"
+ case CAP_MKNOD:
+ return "mknod"
+ case CAP_LEASE:
+ return "lease"
+ case CAP_AUDIT_WRITE:
+ return "audit_write"
+ case CAP_AUDIT_CONTROL:
+ return "audit_control"
+ case CAP_SETFCAP:
+ return "setfcap"
+ case CAP_MAC_OVERRIDE:
+ return "mac_override"
+ case CAP_MAC_ADMIN:
+ return "mac_admin"
+ case CAP_SYSLOG:
+ return "syslog"
+ case CAP_WAKE_ALARM:
+ return "wake_alarm"
+ case CAP_BLOCK_SUSPEND:
+ return "block_suspend"
+ case CAP_AUDIT_READ:
+ return "audit_read"
+ }
+ return "unknown"
+}
+
+// List returns list of all supported capabilities
+func List() []Cap {
+ return []Cap{
+ CAP_CHOWN,
+ CAP_DAC_OVERRIDE,
+ CAP_DAC_READ_SEARCH,
+ CAP_FOWNER,
+ CAP_FSETID,
+ CAP_KILL,
+ CAP_SETGID,
+ CAP_SETUID,
+ CAP_SETPCAP,
+ CAP_LINUX_IMMUTABLE,
+ CAP_NET_BIND_SERVICE,
+ CAP_NET_BROADCAST,
+ CAP_NET_ADMIN,
+ CAP_NET_RAW,
+ CAP_IPC_LOCK,
+ CAP_IPC_OWNER,
+ CAP_SYS_MODULE,
+ CAP_SYS_RAWIO,
+ CAP_SYS_CHROOT,
+ CAP_SYS_PTRACE,
+ CAP_SYS_PACCT,
+ CAP_SYS_ADMIN,
+ CAP_SYS_BOOT,
+ CAP_SYS_NICE,
+ CAP_SYS_RESOURCE,
+ CAP_SYS_TIME,
+ CAP_SYS_TTY_CONFIG,
+ CAP_MKNOD,
+ CAP_LEASE,
+ CAP_AUDIT_WRITE,
+ CAP_AUDIT_CONTROL,
+ CAP_SETFCAP,
+ CAP_MAC_OVERRIDE,
+ CAP_MAC_ADMIN,
+ CAP_SYSLOG,
+ CAP_WAKE_ALARM,
+ CAP_BLOCK_SUSPEND,
+ CAP_AUDIT_READ,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go
new file mode 100644
index 0000000..4c73380
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+)
+
+const fileName = "enum.go"
+const genName = "enum_gen.go"
+
+type generator struct {
+ buf bytes.Buffer
+ caps []string
+}
+
+func (g *generator) writeHeader() {
+ g.buf.WriteString("// generated file; DO NOT EDIT - use go generate in directory with source\n")
+ g.buf.WriteString("\n")
+ g.buf.WriteString("package capability")
+}
+
+func (g *generator) writeStringFunc() {
+ g.buf.WriteString("\n")
+ g.buf.WriteString("func (c Cap) String() string {\n")
+ g.buf.WriteString("switch c {\n")
+ for _, cap := range g.caps {
+ fmt.Fprintf(&g.buf, "case %s:\n", cap)
+ fmt.Fprintf(&g.buf, "return \"%s\"\n", strings.ToLower(cap[4:]))
+ }
+ g.buf.WriteString("}\n")
+ g.buf.WriteString("return \"unknown\"\n")
+ g.buf.WriteString("}\n")
+}
+
+func (g *generator) writeListFunc() {
+ g.buf.WriteString("\n")
+ g.buf.WriteString("// List returns list of all supported capabilities\n")
+ g.buf.WriteString("func List() []Cap {\n")
+ g.buf.WriteString("return []Cap{\n")
+ for _, cap := range g.caps {
+ fmt.Fprintf(&g.buf, "%s,\n", cap)
+ }
+ g.buf.WriteString("}\n")
+ g.buf.WriteString("}\n")
+}
+
+func main() {
+ fs := token.NewFileSet()
+ parsedFile, err := parser.ParseFile(fs, fileName, nil, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ var caps []string
+ for _, decl := range parsedFile.Decls {
+ decl, ok := decl.(*ast.GenDecl)
+ if !ok || decl.Tok != token.CONST {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ vspec := spec.(*ast.ValueSpec)
+ name := vspec.Names[0].Name
+ if strings.HasPrefix(name, "CAP_") {
+ caps = append(caps, name)
+ }
+ }
+ }
+ g := &generator{caps: caps}
+ g.writeHeader()
+ g.writeStringFunc()
+ g.writeListFunc()
+ src, err := format.Source(g.buf.Bytes())
+ if err != nil {
+ fmt.Println("generated invalid Go code")
+ fmt.Println(g.buf.String())
+ log.Fatal(err)
+ }
+ fi, err := os.Stat(fileName)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := ioutil.WriteFile(genName, src, fi.Mode().Perm()); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/syscall_linux.go b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/syscall_linux.go
new file mode 100644
index 0000000..dd6f454
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/syscall_linux.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package capability
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type capHeader struct {
+ version uint32
+ pid int
+}
+
+type capData struct {
+ effective uint32
+ permitted uint32
+ inheritable uint32
+}
+
+func capget(hdr *capHeader, data *capData) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func capset(hdr *capHeader, data *capData) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
+ _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+const (
+ vfsXattrName = "security.capability"
+
+ vfsCapVerMask = 0xff000000
+ vfsCapVer1 = 0x01000000
+ vfsCapVer2 = 0x02000000
+
+ vfsCapFlagMask = ^vfsCapVerMask
+ vfsCapFlageffective = 0x000001
+
+ vfscapDataSizeV1 = 4 * (1 + 2*1)
+ vfscapDataSizeV2 = 4 * (1 + 2*2)
+)
+
+type vfscapData struct {
+ magic uint32
+ data [2]struct {
+ permitted uint32
+ inheritable uint32
+ }
+ effective [2]uint32
+ version int8
+}
+
+var (
+ _vfsXattrName *byte
+)
+
+func init() {
+ _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
+}
+
+func getVfsCap(path string, dest *vfscapData) (err error) {
+ var _p0 *byte
+ _p0, err = syscall.BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
+ if e1 != 0 {
+ if e1 == syscall.ENODATA {
+ dest.version = 2
+ return
+ }
+ err = e1
+ }
+ switch dest.magic & vfsCapVerMask {
+ case vfsCapVer1:
+ dest.version = 1
+ if r0 != vfscapDataSizeV1 {
+ return syscall.EINVAL
+ }
+ dest.data[1].permitted = 0
+ dest.data[1].inheritable = 0
+ case vfsCapVer2:
+ dest.version = 2
+ if r0 != vfscapDataSizeV2 {
+ return syscall.EINVAL
+ }
+ default:
+ return syscall.EINVAL
+ }
+ if dest.magic&vfsCapFlageffective != 0 {
+ dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable
+ dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable
+ } else {
+ dest.effective[0] = 0
+ dest.effective[1] = 0
+ }
+ return
+}
+
+func setVfsCap(path string, data *vfscapData) (err error) {
+ var _p0 *byte
+ _p0, err = syscall.BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var size uintptr
+ if data.version == 1 {
+ data.magic = vfsCapVer1
+ size = vfscapDataSizeV1
+ } else if data.version == 2 {
+ data.magic = vfsCapVer2
+ if data.effective[0] != 0 || data.effective[1] != 0 {
+ data.magic |= vfsCapFlageffective
+ }
+ size = vfscapDataSizeV2
+ } else {
+ return syscall.EINVAL
+ }
+ _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/.travis.yml b/Godeps/_workspace/src/github.com/vishvananda/netlink/.travis.yml
new file mode 100644
index 0000000..1970069
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+install:
+ - go get github.com/vishvananda/netns
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE b/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE
new file mode 100644
index 0000000..9f64db8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/Makefile b/Godeps/_workspace/src/github.com/vishvananda/netlink/Makefile
new file mode 100644
index 0000000..b325018
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/Makefile
@@ -0,0 +1,29 @@
+DIRS := \
+ . \
+ nl
+
+DEPS = \
+ github.com/vishvananda/netns
+
+uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1)))
+testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go))))
+goroot = $(addprefix ../../../,$(1))
+unroot = $(subst ../../../,,$(1))
+fmt = $(addprefix fmt-,$(1))
+
+all: fmt
+
+$(call goroot,$(DEPS)):
+ go get $(call unroot,$@)
+
+.PHONY: $(call testdirs,$(DIRS))
+$(call testdirs,$(DIRS)):
+ sudo -E go test -v github.com/vishvananda/netlink/$@
+
+$(call fmt,$(call testdirs,$(DIRS))):
+ ! gofmt -l $(subst fmt-,,$@)/*.go | grep ''
+
+.PHONY: fmt
+fmt: $(call fmt,$(call testdirs,$(DIRS)))
+
+test: fmt $(call goroot,$(DEPS)) $(call testdirs,$(DIRS))
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/README.md b/Godeps/_workspace/src/github.com/vishvananda/netlink/README.md
new file mode 100644
index 0000000..8cd50a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/README.md
@@ -0,0 +1,89 @@
+# netlink - netlink library for go #
+
+[![Build Status](https://travis-ci.org/vishvananda/netlink.png?branch=master)](https://travis-ci.org/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink)
+
+The netlink package provides a simple netlink library for go. Netlink
+is the interface a user-space program in linux uses to communicate with
+the kernel. It can be used to add and remove interfaces, set ip addresses
+and routes, and configure ipsec. Netlink communication requires elevated
+privileges, so in most cases this code needs to be run as root. Since
+low-level netlink messages are inscrutable at best, the library attempts
+to provide an api that is loosely modeled on the CLI provied by iproute2.
+Actions like `ip link add` will be accomplished via a similarly named
+function like AddLink(). This library began its life as a fork of the
+netlink functionality in
+[docker/libcontainer](https://github.com/docker/libcontainer) but was
+heavily rewritten to improve testability, performance, and to add new
+functionality like ipsec xfrm handling.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+ go get github.com/vishvananda/netlink
+
+Testing dependencies:
+
+ go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+ sudo -E go test github.com/vishvananda/netlink
+
+## Examples ##
+
+Add a new bridge and add eth1 into it:
+
+```go
+package main
+
+import (
+ "net"
+ "github.com/vishvananda/netlink"
+)
+
+func main() {
+ la := netlink.NewLinkAttrs()
+ la.Name = "foo"
+ mybridge := &netlink.Bridge{la}}
+ _ := netlink.LinkAdd(mybridge)
+ eth1, _ := netlink.LinkByName("eth1")
+ netlink.LinkSetMaster(eth1, mybridge)
+}
+
+```
+Note `NewLinkAttrs` constructor, it sets default values in structure. For now
+it sets only `TxQLen` to `-1`, so kernel will set default by itself. If you're
+using simple initialization(`LinkAttrs{Name: "foo"}`) `TxQLen` will be set to
+`0` unless you specify it like `LinkAttrs{Name: "foo", TxQLen: 1000}`.
+
+Add a new ip address to loopback:
+
+```go
+package main
+
+import (
+ "net"
+ "github.com/vishvananda/netlink"
+)
+
+func main() {
+ lo, _ := netlink.LinkByName("lo")
+ addr, _ := netlink.ParseAddr("169.254.169.254/32")
+ netlink.AddrAdd(lo, addr)
+}
+
+```
+
+## Future Work ##
+
+Many pieces of netlink are not yet fully supported in the high-level
+interface. Aspects of virtually all of the high-level objects don't exist.
+Many of the underlying primitives are there, so its a matter of putting
+the right fields into the high-level objects and making sure that they
+are serialized and deserialized correctly in the Add and List methods.
+
+There are also a few pieces of low level netlink functionality that still
+need to be implemented. Routing rules are not in place and some of the
+more advanced link types. Hopefully there is decent structure and testing
+in place to make these fairly straightforward to add.
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/addr.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/addr.go
new file mode 100644
index 0000000..9bbaf50
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/addr.go
@@ -0,0 +1,43 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// Addr represents an IP address from netlink. Netlink ip addresses
+// include a mask, so it stores the address as a net.IPNet.
+type Addr struct {
+ *net.IPNet
+ Label string
+}
+
+// String returns $ip/$netmask $label
+func (a Addr) String() string {
+ return fmt.Sprintf("%s %s", a.IPNet, a.Label)
+}
+
+// ParseAddr parses the string representation of an address in the
+// form $ip/$netmask $label. The label portion is optional
+func ParseAddr(s string) (*Addr, error) {
+ label := ""
+ parts := strings.Split(s, " ")
+ if len(parts) > 1 {
+ s = parts[0]
+ label = parts[1]
+ }
+ m, err := ParseIPNet(s)
+ if err != nil {
+ return nil, err
+ }
+ return &Addr{IPNet: m, Label: label}, nil
+}
+
+// Equal returns true if both Addrs have the same net.IPNet value.
+func (a Addr) Equal(x Addr) bool {
+ sizea, _ := a.Mask.Size()
+ sizeb, _ := x.Mask.Size()
+ // ignore label for comparison
+ return a.IP.Equal(x.IP) && sizea == sizeb
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/addr_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/addr_linux.go
new file mode 100644
index 0000000..19aac0f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/addr_linux.go
@@ -0,0 +1,128 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// AddrAdd will add an IP address to a link device.
+// Equivalent to: `ip addr add $addr dev $link`
+func AddrAdd(link Link, addr *Addr) error {
+
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return addrHandle(link, addr, req)
+}
+
+// AddrDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func AddrDel(link Link, addr *Addr) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK)
+ return addrHandle(link, addr, req)
+}
+
+func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
+ base := link.Attrs()
+ if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
+ return fmt.Errorf("label must begin with interface name")
+ }
+ ensureIndex(base)
+
+ family := nl.GetIPFamily(addr.IP)
+
+ msg := nl.NewIfAddrmsg(family)
+ msg.Index = uint32(base.Index)
+ prefixlen, _ := addr.Mask.Size()
+ msg.Prefixlen = uint8(prefixlen)
+ req.AddData(msg)
+
+ var addrData []byte
+ if family == FAMILY_V4 {
+ addrData = addr.IP.To4()
+ } else {
+ addrData = addr.IP.To16()
+ }
+
+ localData := nl.NewRtAttr(syscall.IFA_LOCAL, addrData)
+ req.AddData(localData)
+
+ addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, addrData)
+ req.AddData(addressData)
+
+ if addr.Label != "" {
+ labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label))
+ req.AddData(labelData)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// AddrList gets a list of IP addresses in the system.
+// Equivalent to: `ip addr show`.
+// The list can be filtered by link and ip family.
+func AddrList(link Link, family int) ([]Addr, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR)
+ if err != nil {
+ return nil, err
+ }
+
+ index := 0
+ if link != nil {
+ base := link.Attrs()
+ ensureIndex(base)
+ index = base.Index
+ }
+
+ var res []Addr
+ for _, m := range msgs {
+ msg := nl.DeserializeIfAddrmsg(m)
+
+ if link != nil && msg.Index != uint32(index) {
+ // Ignore messages from other interfaces
+ continue
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ var local, dst *net.IPNet
+ var addr Addr
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFA_ADDRESS:
+ dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ }
+ case syscall.IFA_LOCAL:
+ local = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ }
+ case syscall.IFA_LABEL:
+ addr.Label = string(attr.Value[:len(attr.Value)-1])
+ }
+ }
+
+ // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
+ if local != nil {
+ addr.IPNet = local
+ } else {
+ addr.IPNet = dst
+ }
+
+ res = append(res, addr)
+ }
+
+ return res, nil
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/filter.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/filter.go
new file mode 100644
index 0000000..83ad700
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/filter.go
@@ -0,0 +1,55 @@
+package netlink
+
+import (
+ "fmt"
+)
+
+type Filter interface {
+ Attrs() *FilterAttrs
+ Type() string
+}
+
+// Filter represents a netlink filter. A filter is associated with a link,
+// has a handle and a parent. The root filter of a device should have a
+// parent == HANDLE_ROOT.
+type FilterAttrs struct {
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Priority uint16 // lower is higher priority
+ Protocol uint16 // syscall.ETH_P_*
+}
+
+func (q FilterAttrs) String() string {
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Priority: %d, Protocol: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Priority, q.Protocol)
+}
+
+// U32 filters on many packet related properties
+type U32 struct {
+ FilterAttrs
+ // Currently only supports redirecting to another interface
+ RedirIndex int
+}
+
+func (filter *U32) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+func (filter *U32) Type() string {
+ return "u32"
+}
+
+// GenericFilter filters represent types that are not currently understood
+// by this netlink library.
+type GenericFilter struct {
+ FilterAttrs
+ FilterType string
+}
+
+func (filter *GenericFilter) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+func (filter *GenericFilter) Type() string {
+ return filter.FilterType
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/filter_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/filter_linux.go
new file mode 100644
index 0000000..1ec6987
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/filter_linux.go
@@ -0,0 +1,191 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// FilterDel will delete a filter from the system.
+// Equivalent to: `tc filter del $filter`
+func FilterDel(filter Filter) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK)
+ base := filter.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)),
+ }
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// FilterAdd will add a filter to the system.
+// Equivalent to: `tc filter add $filter`
+func FilterAdd(filter Filter) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ base := filter.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)),
+ }
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type())))
+
+ options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
+ if u32, ok := filter.(*U32); ok {
+ // match all
+ sel := nl.TcU32Sel{
+ Nkeys: 1,
+ Flags: nl.TC_U32_TERMINAL,
+ }
+ sel.Keys = append(sel.Keys, nl.TcU32Key{})
+ nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
+ actions := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
+ table := nl.NewRtAttrChild(actions, nl.TCA_ACT_TAB, nil)
+ nl.NewRtAttrChild(table, nl.TCA_KIND, nl.ZeroTerminated("mirred"))
+ // redirect to other interface
+ mir := nl.TcMirred{
+ Action: nl.TC_ACT_STOLEN,
+ Eaction: nl.TCA_EGRESS_REDIR,
+ Ifindex: uint32(u32.RedirIndex),
+ }
+ aopts := nl.NewRtAttrChild(table, nl.TCA_OPTIONS, nil)
+ nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mir.Serialize())
+ }
+ req.AddData(options)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// FilterList gets a list of filters in the system.
+// Equivalent to: `tc filter show`.
+// Generally retunrs nothing if link and parent are not specified.
+func FilterList(link Link, parent uint32) ([]Filter, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP)
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Parent: parent,
+ }
+ if link != nil {
+ base := link.Attrs()
+ ensureIndex(base)
+ msg.Ifindex = int32(base.Index)
+ }
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Filter
+ for _, m := range msgs {
+ msg := nl.DeserializeTcMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ base := FilterAttrs{
+ LinkIndex: int(msg.Ifindex),
+ Handle: msg.Handle,
+ Parent: msg.Parent,
+ }
+ base.Priority, base.Protocol = MajorMinor(msg.Info)
+ base.Protocol = nl.Swap16(base.Protocol)
+
+ var filter Filter
+ filterType := ""
+ detailed := false
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.TCA_KIND:
+ filterType = string(attr.Value[:len(attr.Value)-1])
+ switch filterType {
+ case "u32":
+ filter = &U32{}
+ default:
+ filter = &GenericFilter{FilterType: filterType}
+ }
+ case nl.TCA_OPTIONS:
+ switch filterType {
+ case "u32":
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ detailed, err = parseU32Data(filter, data)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ // only return the detailed version of the filter
+ if detailed {
+ *filter.Attrs() = base
+ res = append(res, filter)
+ }
+ }
+
+ return res, nil
+}
+
+func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
+ native = nl.NativeEndian()
+ u32 := filter.(*U32)
+ detailed := false
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_U32_SEL:
+ detailed = true
+ sel := nl.DeserializeTcU32Sel(datum.Value)
+ // only parse if we have a very basic redirect
+ if sel.Flags&nl.TC_U32_TERMINAL == 0 || sel.Nkeys != 1 {
+ return detailed, nil
+ }
+ case nl.TCA_U32_ACT:
+ table, err := nl.ParseRouteAttr(datum.Value)
+ if err != nil {
+ return detailed, err
+ }
+ if len(table) != 1 || table[0].Attr.Type != nl.TCA_ACT_TAB {
+ return detailed, fmt.Errorf("Action table not formed properly")
+ }
+ aattrs, err := nl.ParseRouteAttr(table[0].Value)
+ for _, aattr := range aattrs {
+ switch aattr.Attr.Type {
+ case nl.TCA_KIND:
+ actionType := string(aattr.Value[:len(aattr.Value)-1])
+ // only parse if the action is mirred
+ if actionType != "mirred" {
+ return detailed, nil
+ }
+ case nl.TCA_OPTIONS:
+ adata, err := nl.ParseRouteAttr(aattr.Value)
+ if err != nil {
+ return detailed, err
+ }
+ for _, adatum := range adata {
+ switch adatum.Attr.Type {
+ case nl.TCA_MIRRED_PARMS:
+ mir := nl.DeserializeTcMirred(adatum.Value)
+ u32.RedirIndex = int(mir.Ifindex)
+ }
+ }
+ }
+ }
+ }
+ }
+ return detailed, nil
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/link.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/link.go
new file mode 100644
index 0000000..18fd175
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/link.go
@@ -0,0 +1,223 @@
+package netlink
+
+import "net"
+
+// Link represents a link device from netlink. Shared link attributes
+// like name may be retrieved using the Attrs() method. Unique data
+// can be retrieved by casting the object to the proper type.
+type Link interface {
+ Attrs() *LinkAttrs
+ Type() string
+}
+
+type (
+ NsPid int
+ NsFd int
+)
+
+// LinkAttrs represents data shared by most link types
+type LinkAttrs struct {
+ Index int
+ MTU int
+ TxQLen int // Transmit Queue Length
+ Name string
+ HardwareAddr net.HardwareAddr
+ Flags net.Flags
+ ParentIndex int // index of the parent link device
+ MasterIndex int // must be the index of a bridge
+ Namespace interface{} // nil | NsPid | NsFd
+}
+
+// NewLinkAttrs returns LinkAttrs structure filled with default values
+func NewLinkAttrs() LinkAttrs {
+ return LinkAttrs{
+ TxQLen: -1,
+ }
+}
+
+// Device links cannot be created via netlink. These links
+// are links created by udev like 'lo' and 'etho0'
+type Device struct {
+ LinkAttrs
+}
+
+func (device *Device) Attrs() *LinkAttrs {
+ return &device.LinkAttrs
+}
+
+func (device *Device) Type() string {
+ return "device"
+}
+
+// Dummy links are dummy ethernet devices
+type Dummy struct {
+ LinkAttrs
+}
+
+func (dummy *Dummy) Attrs() *LinkAttrs {
+ return &dummy.LinkAttrs
+}
+
+func (dummy *Dummy) Type() string {
+ return "dummy"
+}
+
+// Ifb links are advanced dummy devices for packet filtering
+type Ifb struct {
+ LinkAttrs
+}
+
+func (ifb *Ifb) Attrs() *LinkAttrs {
+ return &ifb.LinkAttrs
+}
+
+func (ifb *Ifb) Type() string {
+ return "ifb"
+}
+
+// Bridge links are simple linux bridges
+type Bridge struct {
+ LinkAttrs
+}
+
+func (bridge *Bridge) Attrs() *LinkAttrs {
+ return &bridge.LinkAttrs
+}
+
+func (bridge *Bridge) Type() string {
+ return "bridge"
+}
+
+// Vlan links have ParentIndex set in their Attrs()
+type Vlan struct {
+ LinkAttrs
+ VlanId int
+}
+
+func (vlan *Vlan) Attrs() *LinkAttrs {
+ return &vlan.LinkAttrs
+}
+
+func (vlan *Vlan) Type() string {
+ return "vlan"
+}
+
+type MacvlanMode uint16
+
+const (
+ MACVLAN_MODE_DEFAULT MacvlanMode = iota
+ MACVLAN_MODE_PRIVATE
+ MACVLAN_MODE_VEPA
+ MACVLAN_MODE_BRIDGE
+ MACVLAN_MODE_PASSTHRU
+ MACVLAN_MODE_SOURCE
+)
+
+// Macvlan links have ParentIndex set in their Attrs()
+type Macvlan struct {
+ LinkAttrs
+ Mode MacvlanMode
+}
+
+func (macvlan *Macvlan) Attrs() *LinkAttrs {
+ return &macvlan.LinkAttrs
+}
+
+func (macvlan *Macvlan) Type() string {
+ return "macvlan"
+}
+
+// Macvtap - macvtap is a virtual interfaces based on macvlan
+type Macvtap struct {
+ Macvlan
+}
+
+func (macvtap Macvtap) Type() string {
+ return "macvtap"
+}
+
+// Veth devices must specify PeerName on create
+type Veth struct {
+ LinkAttrs
+ PeerName string // veth on create only
+}
+
+func (veth *Veth) Attrs() *LinkAttrs {
+ return &veth.LinkAttrs
+}
+
+func (veth *Veth) Type() string {
+ return "veth"
+}
+
+// GenericLink links represent types that are not currently understood
+// by this netlink library.
+type GenericLink struct {
+ LinkAttrs
+ LinkType string
+}
+
+func (generic *GenericLink) Attrs() *LinkAttrs {
+ return &generic.LinkAttrs
+}
+
+func (generic *GenericLink) Type() string {
+ return generic.LinkType
+}
+
+type Vxlan struct {
+ LinkAttrs
+ VxlanId int
+ VtepDevIndex int
+ SrcAddr net.IP
+ Group net.IP
+ TTL int
+ TOS int
+ Learning bool
+ Proxy bool
+ RSC bool
+ L2miss bool
+ L3miss bool
+ NoAge bool
+ GBP bool
+ Age int
+ Limit int
+ Port int
+ PortLow int
+ PortHigh int
+}
+
+func (vxlan *Vxlan) Attrs() *LinkAttrs {
+ return &vxlan.LinkAttrs
+}
+
+func (vxlan *Vxlan) Type() string {
+ return "vxlan"
+}
+
+type IPVlanMode uint16
+
+const (
+ IPVLAN_MODE_L2 IPVlanMode = iota
+ IPVLAN_MODE_L3
+ IPVLAN_MODE_MAX
+)
+
+type IPVlan struct {
+ LinkAttrs
+ Mode IPVlanMode
+}
+
+func (ipvlan *IPVlan) Attrs() *LinkAttrs {
+ return &ipvlan.LinkAttrs
+}
+
+func (ipvlan *IPVlan) Type() string {
+ return "ipvlan"
+}
+
+// iproute2 supported devices;
+// vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
+// bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan |
+// gre | gretap | ip6gre | ip6gretap | vti | nlmon |
+// bond_slave | ipvlan
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/link_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/link_linux.go
new file mode 100644
index 0000000..6851150
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/link_linux.go
@@ -0,0 +1,750 @@
+package netlink
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+var native = nl.NativeEndian()
+var lookupByDump = false
+
+var macvlanModes = [...]uint32{
+ 0,
+ nl.MACVLAN_MODE_PRIVATE,
+ nl.MACVLAN_MODE_VEPA,
+ nl.MACVLAN_MODE_BRIDGE,
+ nl.MACVLAN_MODE_PASSTHRU,
+ nl.MACVLAN_MODE_SOURCE,
+}
+
+func ensureIndex(link *LinkAttrs) {
+ if link != nil && link.Index == 0 {
+ newlink, _ := LinkByName(link.Name)
+ if newlink != nil {
+ link.Index = newlink.Attrs().Index
+ }
+ }
+}
+
+// LinkSetUp enables the link device.
+// Equivalent to: `ip link set $link up`
+func LinkSetUp(link Link) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_UP
+ msg.Flags = syscall.IFF_UP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetDown disables link device.
+// Equivalent to: `ip link set $link down`
+func LinkSetDown(link Link) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_UP
+ msg.Flags = 0 & ^syscall.IFF_UP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetMTU sets the mtu of the link device.
+// Equivalent to: `ip link set $link mtu $mtu`
+func LinkSetMTU(link Link, mtu int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(mtu))
+
+ data := nl.NewRtAttr(syscall.IFLA_MTU, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetName sets the name of the link device.
+// Equivalent to: `ip link set $link name $name`
+func LinkSetName(link Link, name string) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetHardwareAddr sets the hardware address of the link device.
+// Equivalent to: `ip link set $link address $hwaddr`
+func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetMaster sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMaster(link Link, master *Bridge) error {
+ index := 0
+ if master != nil {
+ masterBase := master.Attrs()
+ ensureIndex(masterBase)
+ index = masterBase.Index
+ }
+ return LinkSetMasterByIndex(link, index)
+}
+
+// LinkSetMasterByIndex sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMasterByIndex(link Link, masterIndex int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(masterIndex))
+
+ data := nl.NewRtAttr(syscall.IFLA_MASTER, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// pid must be a pid of a running process.
+// Equivalent to: `ip link set $link netns $pid`
+func LinkSetNsPid(link Link, nspid int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(nspid))
+
+ data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetNsFd puts the device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `ip link set $link netns $ns`
+func LinkSetNsFd(link Link, fd int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(fd))
+
+ data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func boolAttr(val bool) []byte {
+ var v uint8
+ if val {
+ v = 1
+ }
+ return nl.Uint8Attr(v)
+}
+
+type vxlanPortRange struct {
+ Lo, Hi uint16
+}
+
+func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+ if vxlan.VtepDevIndex != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
+ }
+ if vxlan.SrcAddr != nil {
+ ip := vxlan.SrcAddr.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip))
+ } else {
+ ip = vxlan.SrcAddr.To16()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip))
+ }
+ }
+ }
+ if vxlan.Group != nil {
+ group := vxlan.Group.To4()
+ if group != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group))
+ } else {
+ group = vxlan.Group.To16()
+ if group != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group))
+ }
+ }
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
+
+ if vxlan.GBP {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, boolAttr(vxlan.GBP))
+ }
+
+ if vxlan.NoAge {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
+ } else if vxlan.Age > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
+ }
+ if vxlan.Limit > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
+ }
+ if vxlan.Port > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, nl.Uint16Attr(uint16(vxlan.Port)))
+ }
+ if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
+ pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
+
+ buf := new(bytes.Buffer)
+ binary.Write(buf, binary.BigEndian, &pr)
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
+ }
+}
+
+// LinkAdd adds a new link device. The type and features of the device
+// are taken fromt the parameters in the link object.
+// Equivalent to: `ip link add $link`
+func LinkAdd(link Link) error {
+ // TODO: set mtu and hardware address
+ // TODO: support extra data for macvlan
+ base := link.Attrs()
+
+ if base.Name == "" {
+ return fmt.Errorf("LinkAttrs.Name cannot be empty!")
+ }
+
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ if base.ParentIndex != 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(base.ParentIndex))
+ data := nl.NewRtAttr(syscall.IFLA_LINK, b)
+ req.AddData(data)
+ } else if link.Type() == "ipvlan" {
+ return fmt.Errorf("Can't create ipvlan link without ParentIndex")
+ }
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name))
+ req.AddData(nameData)
+
+ if base.MTU > 0 {
+ mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ req.AddData(mtu)
+ }
+
+ if base.TxQLen >= 0 {
+ qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
+ req.AddData(qlen)
+ }
+
+ if base.Namespace != nil {
+ var attr *nl.RtAttr
+ switch base.Namespace.(type) {
+ case NsPid:
+ val := nl.Uint32Attr(uint32(base.Namespace.(NsPid)))
+ attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val)
+ case NsFd:
+ val := nl.Uint32Attr(uint32(base.Namespace.(NsFd)))
+ attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val)
+ }
+
+ req.AddData(attr)
+ }
+
+ linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
+ nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
+
+ if vlan, ok := link.(*Vlan); ok {
+ b := make([]byte, 2)
+ native.PutUint16(b, uint16(vlan.VlanId))
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
+ } else if veth, ok := link.(*Veth); ok {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
+ nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
+ nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
+ if base.TxQLen >= 0 {
+ nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
+ }
+ if base.MTU > 0 {
+ nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ }
+
+ } else if vxlan, ok := link.(*Vxlan); ok {
+ addVxlanAttrs(vxlan, linkInfo)
+ } else if ipv, ok := link.(*IPVlan); ok {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
+ } else if macv, ok := link.(*Macvlan); ok {
+ if macv.Mode != MACVLAN_MODE_DEFAULT {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+ }
+ }
+
+ req.AddData(linkInfo)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+
+ ensureIndex(base)
+
+ // can't set master during create, so set it afterwards
+ if base.MasterIndex != 0 {
+ // TODO: verify MasterIndex is actually a bridge?
+ return LinkSetMasterByIndex(link, base.MasterIndex)
+ }
+ return nil
+}
+
+// LinkDel deletes link device. Either Index or Name must be set in
+// the link object for it to be deleted. The other values are ignored.
+// Equivalent to: `ip link del $link`
+func LinkDel(link Link) error {
+ base := link.Attrs()
+
+ ensureIndex(base)
+
+ req := nl.NewNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func linkByNameDump(name string) (Link, error) {
+ links, err := LinkList()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, link := range links {
+ if link.Attrs().Name == name {
+ return link, nil
+ }
+ }
+ return nil, fmt.Errorf("Link %s not found", name)
+}
+
+// LinkByName finds a link by name and returns a pointer to the object.
+func LinkByName(name string) (Link, error) {
+ if lookupByDump {
+ return linkByNameDump(name)
+ }
+
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name))
+ req.AddData(nameData)
+
+ link, err := execGetLink(req)
+ if err == syscall.EINVAL {
+ // older kernels don't support looking up via IFLA_IFNAME
+ // so fall back to dumping all links
+ lookupByDump = true
+ return linkByNameDump(name)
+ }
+
+ return link, err
+}
+
+// LinkByIndex finds a link by index and returns a pointer to the object.
+func LinkByIndex(index int) (Link, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(index)
+ req.AddData(msg)
+
+ return execGetLink(req)
+}
+
+func execGetLink(req *nl.NetlinkRequest) (Link, error) {
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ if errno, ok := err.(syscall.Errno); ok {
+ if errno == syscall.ENODEV {
+ return nil, fmt.Errorf("Link not found")
+ }
+ }
+ return nil, err
+ }
+
+ switch {
+ case len(msgs) == 0:
+ return nil, fmt.Errorf("Link not found")
+
+ case len(msgs) == 1:
+ return linkDeserialize(msgs[0])
+
+ default:
+ return nil, fmt.Errorf("More than one link found")
+ }
+}
+
+// linkDeserialize deserializes a raw message received from netlink into
+// a link object.
+func linkDeserialize(m []byte) (Link, error) {
+ msg := nl.DeserializeIfInfomsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ base := LinkAttrs{Index: int(msg.Index), Flags: linkFlags(msg.Flags)}
+ var link Link
+ linkType := ""
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFLA_LINKINFO:
+ infos, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ for _, info := range infos {
+ switch info.Attr.Type {
+ case nl.IFLA_INFO_KIND:
+ linkType = string(info.Value[:len(info.Value)-1])
+ switch linkType {
+ case "dummy":
+ link = &Dummy{}
+ case "ifb":
+ link = &Ifb{}
+ case "bridge":
+ link = &Bridge{}
+ case "vlan":
+ link = &Vlan{}
+ case "veth":
+ link = &Veth{}
+ case "vxlan":
+ link = &Vxlan{}
+ case "ipvlan":
+ link = &IPVlan{}
+ case "macvlan":
+ link = &Macvlan{}
+ case "macvtap":
+ link = &Macvtap{}
+ default:
+ link = &GenericLink{LinkType: linkType}
+ }
+ case nl.IFLA_INFO_DATA:
+ data, err := nl.ParseRouteAttr(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ switch linkType {
+ case "vlan":
+ parseVlanData(link, data)
+ case "vxlan":
+ parseVxlanData(link, data)
+ case "ipvlan":
+ parseIPVlanData(link, data)
+ case "macvlan":
+ parseMacvlanData(link, data)
+ case "macvtap":
+ parseMacvtapData(link, data)
+ }
+ }
+ }
+ case syscall.IFLA_ADDRESS:
+ var nonzero bool
+ for _, b := range attr.Value {
+ if b != 0 {
+ nonzero = true
+ }
+ }
+ if nonzero {
+ base.HardwareAddr = attr.Value[:]
+ }
+ case syscall.IFLA_IFNAME:
+ base.Name = string(attr.Value[:len(attr.Value)-1])
+ case syscall.IFLA_MTU:
+ base.MTU = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_LINK:
+ base.ParentIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_MASTER:
+ base.MasterIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_TXQLEN:
+ base.TxQLen = int(native.Uint32(attr.Value[0:4]))
+ }
+ }
+ // Links that don't have IFLA_INFO_KIND are hardware devices
+ if link == nil {
+ link = &Device{}
+ }
+ *link.Attrs() = base
+
+ return link, nil
+}
+
+// LinkList gets a list of link devices.
+// Equivalent to: `ip link show`
+func LinkList() ([]Link, error) {
+ // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
+ // to get the message ourselves to parse link type.
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Link
+ for _, m := range msgs {
+ link, err := linkDeserialize(m)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, link)
+ }
+
+ return res, nil
+}
+
+func LinkSetHairpin(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
+}
+
+func LinkSetGuard(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
+}
+
+func LinkSetFastLeave(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
+}
+
+func LinkSetLearning(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
+}
+
+func LinkSetRootBlock(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
+}
+
+func LinkSetFlood(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
+}
+
+func setProtinfoAttr(link Link, mode bool, attr int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil)
+ nl.NewRtAttrChild(br, attr, boolToByte(mode))
+ req.AddData(br)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ vlan := link.(*Vlan)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VLAN_ID:
+ vlan.VlanId = int(native.Uint16(datum.Value[0:2]))
+ }
+ }
+}
+
+func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ vxlan := link.(*Vxlan)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VXLAN_ID:
+ vxlan.VxlanId = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_LINK:
+ vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_LOCAL:
+ vxlan.SrcAddr = net.IP(datum.Value[0:4])
+ case nl.IFLA_VXLAN_LOCAL6:
+ vxlan.SrcAddr = net.IP(datum.Value[0:16])
+ case nl.IFLA_VXLAN_GROUP:
+ vxlan.Group = net.IP(datum.Value[0:4])
+ case nl.IFLA_VXLAN_GROUP6:
+ vxlan.Group = net.IP(datum.Value[0:16])
+ case nl.IFLA_VXLAN_TTL:
+ vxlan.TTL = int(datum.Value[0])
+ case nl.IFLA_VXLAN_TOS:
+ vxlan.TOS = int(datum.Value[0])
+ case nl.IFLA_VXLAN_LEARNING:
+ vxlan.Learning = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_PROXY:
+ vxlan.Proxy = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_RSC:
+ vxlan.RSC = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_L2MISS:
+ vxlan.L2miss = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_L3MISS:
+ vxlan.L3miss = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_GBP:
+ vxlan.GBP = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_AGEING:
+ vxlan.Age = int(native.Uint32(datum.Value[0:4]))
+ vxlan.NoAge = vxlan.Age == 0
+ case nl.IFLA_VXLAN_LIMIT:
+ vxlan.Limit = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_PORT:
+ vxlan.Port = int(native.Uint16(datum.Value[0:2]))
+ case nl.IFLA_VXLAN_PORT_RANGE:
+ buf := bytes.NewBuffer(datum.Value[0:4])
+ var pr vxlanPortRange
+ if binary.Read(buf, binary.BigEndian, &pr) != nil {
+ vxlan.PortLow = int(pr.Lo)
+ vxlan.PortHigh = int(pr.Hi)
+ }
+ }
+ }
+}
+
+func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ ipv := link.(*IPVlan)
+ for _, datum := range data {
+ if datum.Attr.Type == nl.IFLA_IPVLAN_MODE {
+ ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4]))
+ return
+ }
+ }
+}
+
+func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) {
+ macv := link.(*Macvtap)
+ parseMacvlanData(&macv.Macvlan, data)
+}
+
+func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ macv := link.(*Macvlan)
+ for _, datum := range data {
+ if datum.Attr.Type == nl.IFLA_MACVLAN_MODE {
+ switch native.Uint32(datum.Value[0:4]) {
+ case nl.MACVLAN_MODE_PRIVATE:
+ macv.Mode = MACVLAN_MODE_PRIVATE
+ case nl.MACVLAN_MODE_VEPA:
+ macv.Mode = MACVLAN_MODE_VEPA
+ case nl.MACVLAN_MODE_BRIDGE:
+ macv.Mode = MACVLAN_MODE_BRIDGE
+ case nl.MACVLAN_MODE_PASSTHRU:
+ macv.Mode = MACVLAN_MODE_PASSTHRU
+ case nl.MACVLAN_MODE_SOURCE:
+ macv.Mode = MACVLAN_MODE_SOURCE
+ }
+ return
+ }
+ }
+}
+
+// copied from pkg/net_linux.go
+func linkFlags(rawFlags uint32) net.Flags {
+ var f net.Flags
+ if rawFlags&syscall.IFF_UP != 0 {
+ f |= net.FlagUp
+ }
+ if rawFlags&syscall.IFF_BROADCAST != 0 {
+ f |= net.FlagBroadcast
+ }
+ if rawFlags&syscall.IFF_LOOPBACK != 0 {
+ f |= net.FlagLoopback
+ }
+ if rawFlags&syscall.IFF_POINTOPOINT != 0 {
+ f |= net.FlagPointToPoint
+ }
+ if rawFlags&syscall.IFF_MULTICAST != 0 {
+ f |= net.FlagMulticast
+ }
+ return f
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh.go
new file mode 100644
index 0000000..0e5eb90
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh.go
@@ -0,0 +1,22 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Neigh represents a link layer neighbor from netlink.
+type Neigh struct {
+ LinkIndex int
+ Family int
+ State int
+ Type int
+ Flags int
+ IP net.IP
+ HardwareAddr net.HardwareAddr
+}
+
+// String returns $ip/$hwaddr $label
+func (neigh *Neigh) String() string {
+ return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr)
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh_linux.go
new file mode 100644
index 0000000..620a0ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh_linux.go
@@ -0,0 +1,189 @@
+package netlink
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+const (
+ NDA_UNSPEC = iota
+ NDA_DST
+ NDA_LLADDR
+ NDA_CACHEINFO
+ NDA_PROBES
+ NDA_VLAN
+ NDA_PORT
+ NDA_VNI
+ NDA_IFINDEX
+ NDA_MAX = NDA_IFINDEX
+)
+
+// Neighbor Cache Entry States.
+const (
+ NUD_NONE = 0x00
+ NUD_INCOMPLETE = 0x01
+ NUD_REACHABLE = 0x02
+ NUD_STALE = 0x04
+ NUD_DELAY = 0x08
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+)
+
+// Neighbor Flags
+const (
+ NTF_USE = 0x01
+ NTF_SELF = 0x02
+ NTF_MASTER = 0x04
+ NTF_PROXY = 0x08
+ NTF_ROUTER = 0x80
+)
+
+type Ndmsg struct {
+ Family uint8
+ Index uint32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
+func deserializeNdmsg(b []byte) *Ndmsg {
+ var dummy Ndmsg
+ return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0]))
+}
+
+func (msg *Ndmsg) Serialize() []byte {
+ return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *Ndmsg) Len() int {
+ return int(unsafe.Sizeof(*msg))
+}
+
+// NeighAdd will add an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh add ....`
+func NeighAdd(neigh *Neigh) error {
+ return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL)
+}
+
+// NeighAdd will add or replace an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh replace....`
+func NeighSet(neigh *Neigh) error {
+ return neighAdd(neigh, syscall.NLM_F_CREATE)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func NeighAppend(neigh *Neigh) error {
+ return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND)
+}
+
+func neighAdd(neigh *Neigh, mode int) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK)
+ return neighHandle(neigh, req)
+}
+
+// NeighDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func NeighDel(neigh *Neigh) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK)
+ return neighHandle(neigh, req)
+}
+
+func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
+ var family int
+ if neigh.Family > 0 {
+ family = neigh.Family
+ } else {
+ family = nl.GetIPFamily(neigh.IP)
+ }
+
+ msg := Ndmsg{
+ Family: uint8(family),
+ Index: uint32(neigh.LinkIndex),
+ State: uint16(neigh.State),
+ Type: uint8(neigh.Type),
+ Flags: uint8(neigh.Flags),
+ }
+ req.AddData(&msg)
+
+ ipData := neigh.IP.To4()
+ if ipData == nil {
+ ipData = neigh.IP.To16()
+ }
+
+ dstData := nl.NewRtAttr(NDA_DST, ipData)
+ req.AddData(dstData)
+
+ hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr))
+ req.AddData(hwData)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// Equivalent to: `ip neighbor show`.
+// The list can be filtered by link and ip family.
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP)
+ msg := Ndmsg{
+ Family: uint8(family),
+ }
+ req.AddData(&msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Neigh
+ for _, m := range msgs {
+ ndm := deserializeNdmsg(m)
+ if linkIndex != 0 && int(ndm.Index) != linkIndex {
+ // Ignore messages from other interfaces
+ continue
+ }
+
+ neigh, err := NeighDeserialize(m)
+ if err != nil {
+ continue
+ }
+
+ res = append(res, *neigh)
+ }
+
+ return res, nil
+}
+
+func NeighDeserialize(m []byte) (*Neigh, error) {
+ msg := deserializeNdmsg(m)
+
+ neigh := Neigh{
+ LinkIndex: int(msg.Index),
+ Family: int(msg.Family),
+ State: int(msg.State),
+ Type: int(msg.Type),
+ Flags: int(msg.Flags),
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case NDA_DST:
+ neigh.IP = net.IP(attr.Value)
+ case NDA_LLADDR:
+ neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+ }
+ }
+
+ return &neigh, nil
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink.go
new file mode 100644
index 0000000..41ebdb1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink.go
@@ -0,0 +1,39 @@
+// Package netlink provides a simple library for netlink. Netlink is
+// the interface a user-space program in linux uses to communicate with
+// the kernel. It can be used to add and remove interfaces, set up ip
+// addresses and routes, and confiugre ipsec. Netlink communication
+// requires elevated privileges, so in most cases this code needs to
+// be run as root. The low level primitives for netlink are contained
+// in the nl subpackage. This package attempts to provide a high-level
+// interface that is loosly modeled on the iproute2 cli.
+package netlink
+
+import (
+ "net"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+const (
+ // Family type definitions
+ FAMILY_ALL = nl.FAMILY_ALL
+ FAMILY_V4 = nl.FAMILY_V4
+ FAMILY_V6 = nl.FAMILY_V6
+)
+
+// ParseIPNet parses a string in ip/net format and returns a net.IPNet.
+// This is valuable because addresses in netlink are often IPNets and
+// ParseCIDR returns an IPNet with the IP part set to the base IP of the
+// range.
+func ParseIPNet(s string) (*net.IPNet, error) {
+ ip, ipNet, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil, err
+ }
+ return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil
+}
+
+// NewIPNet generates an IPNet from an ip address using a netmask of 32.
+func NewIPNet(ip net.IP) *net.IPNet {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)}
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink_unspecified.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink_unspecified.go
new file mode 100644
index 0000000..10c49c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -0,0 +1,143 @@
+// +build !linux
+
+package netlink
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+func LinkSetUp(link *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetDown(link *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMTU(link *Link, mtu int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMaster(link *Link, master *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNsPid(link *Link, nspid int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNsFd(link *Link, fd int) error {
+ return ErrNotImplemented
+}
+
+func LinkAdd(link *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkDel(link *Link) error {
+ return ErrNotImplemented
+}
+
+func SetHairpin(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetGuard(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetFastLeave(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetLearning(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetRootBlock(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetFlood(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkList() ([]Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func AddrAdd(link *Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func AddrDel(link *Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func AddrList(link *Link, family int) ([]Addr, error) {
+ return nil, ErrNotImplemented
+}
+
+func RouteAdd(route *Route) error {
+ return ErrNotImplemented
+}
+
+func RouteDel(route *Route) error {
+ return ErrNotImplemented
+}
+
+func RouteList(link *Link, family int) ([]Route, error) {
+ return nil, ErrNotImplemented
+}
+
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+ return ErrNotImplemented
+}
+
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+ return ErrNotImplemented
+}
+
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ return nil, ErrNotImplemented
+}
+
+func XfrmStateAdd(policy *XfrmState) error {
+ return ErrNotImplemented
+}
+
+func XfrmStateDel(policy *XfrmState) error {
+ return ErrNotImplemented
+}
+
+func XfrmStateList(family int) ([]XfrmState, error) {
+ return nil, ErrNotImplemented
+}
+
+func NeighAdd(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighSet(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighAppend(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighDel(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+ return nil, ErrNotImplemented
+}
+
+func NeighDeserialize(m []byte) (*Ndmsg, *Neigh, error) {
+ return nil, nil, ErrNotImplemented
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/addr_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/addr_linux.go
new file mode 100644
index 0000000..17088fa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/addr_linux.go
@@ -0,0 +1,47 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type IfAddrmsg struct {
+ syscall.IfAddrmsg
+}
+
+func NewIfAddrmsg(family int) *IfAddrmsg {
+ return &IfAddrmsg{
+ IfAddrmsg: syscall.IfAddrmsg{
+ Family: uint8(family),
+ },
+ }
+}
+
+// struct ifaddrmsg {
+// __u8 ifa_family;
+// __u8 ifa_prefixlen; /* The prefix length */
+// __u8 ifa_flags; /* Flags */
+// __u8 ifa_scope; /* Address scope */
+// __u32 ifa_index; /* Link index */
+// };
+
+// type IfAddrmsg struct {
+// Family uint8
+// Prefixlen uint8
+// Flags uint8
+// Scope uint8
+// Index uint32
+// }
+// SizeofIfAddrmsg = 0x8
+
+func DeserializeIfAddrmsg(b []byte) *IfAddrmsg {
+ return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0]))
+}
+
+func (msg *IfAddrmsg) Serialize() []byte {
+ return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfAddrmsg) Len() int {
+ return syscall.SizeofIfAddrmsg
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/link_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/link_linux.go
new file mode 100644
index 0000000..1f9ab08
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/link_linux.go
@@ -0,0 +1,104 @@
+package nl
+
+const (
+ DEFAULT_CHANGE = 0xFFFFFFFF
+)
+
+const (
+ IFLA_INFO_UNSPEC = iota
+ IFLA_INFO_KIND
+ IFLA_INFO_DATA
+ IFLA_INFO_XSTATS
+ IFLA_INFO_MAX = IFLA_INFO_XSTATS
+)
+
+const (
+ IFLA_VLAN_UNSPEC = iota
+ IFLA_VLAN_ID
+ IFLA_VLAN_FLAGS
+ IFLA_VLAN_EGRESS_QOS
+ IFLA_VLAN_INGRESS_QOS
+ IFLA_VLAN_PROTOCOL
+ IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL
+)
+
+const (
+ VETH_INFO_UNSPEC = iota
+ VETH_INFO_PEER
+ VETH_INFO_MAX = VETH_INFO_PEER
+)
+
+const (
+ IFLA_VXLAN_UNSPEC = iota
+ IFLA_VXLAN_ID
+ IFLA_VXLAN_GROUP
+ IFLA_VXLAN_LINK
+ IFLA_VXLAN_LOCAL
+ IFLA_VXLAN_TTL
+ IFLA_VXLAN_TOS
+ IFLA_VXLAN_LEARNING
+ IFLA_VXLAN_AGEING
+ IFLA_VXLAN_LIMIT
+ IFLA_VXLAN_PORT_RANGE
+ IFLA_VXLAN_PROXY
+ IFLA_VXLAN_RSC
+ IFLA_VXLAN_L2MISS
+ IFLA_VXLAN_L3MISS
+ IFLA_VXLAN_PORT
+ IFLA_VXLAN_GROUP6
+ IFLA_VXLAN_LOCAL6
+ IFLA_VXLAN_UDP_CSUM
+ IFLA_VXLAN_UDP_ZERO_CSUM6_TX
+ IFLA_VXLAN_UDP_ZERO_CSUM6_RX
+ IFLA_VXLAN_REMCSUM_TX
+ IFLA_VXLAN_REMCSUM_RX
+ IFLA_VXLAN_GBP
+ IFLA_VXLAN_REMCSUM_NOPARTIAL
+ IFLA_VXLAN_FLOWBASED
+ IFLA_VXLAN_MAX = IFLA_VXLAN_FLOWBASED
+)
+
+const (
+ BRIDGE_MODE_UNSPEC = iota
+ BRIDGE_MODE_HAIRPIN
+)
+
+const (
+ IFLA_BRPORT_UNSPEC = iota
+ IFLA_BRPORT_STATE
+ IFLA_BRPORT_PRIORITY
+ IFLA_BRPORT_COST
+ IFLA_BRPORT_MODE
+ IFLA_BRPORT_GUARD
+ IFLA_BRPORT_PROTECT
+ IFLA_BRPORT_FAST_LEAVE
+ IFLA_BRPORT_LEARNING
+ IFLA_BRPORT_UNICAST_FLOOD
+ IFLA_BRPORT_MAX = IFLA_BRPORT_UNICAST_FLOOD
+)
+
+const (
+ IFLA_IPVLAN_UNSPEC = iota
+ IFLA_IPVLAN_MODE
+ IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE
+)
+
+const (
+ // not defined in syscall
+ IFLA_NET_NS_FD = 28
+)
+
+const (
+ IFLA_MACVLAN_UNSPEC = iota
+ IFLA_MACVLAN_MODE
+ IFLA_MACVLAN_FLAGS
+ IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS
+)
+
+const (
+ MACVLAN_MODE_PRIVATE = 1
+ MACVLAN_MODE_VEPA = 2
+ MACVLAN_MODE_BRIDGE = 4
+ MACVLAN_MODE_PASSTHRU = 8
+ MACVLAN_MODE_SOURCE = 16
+)
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/nl_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/nl_linux.go
new file mode 100644
index 0000000..8dbd92b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -0,0 +1,418 @@
+// Package nl has low level primitives for making Netlink calls.
+package nl
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // Family type definitions
+ FAMILY_ALL = syscall.AF_UNSPEC
+ FAMILY_V4 = syscall.AF_INET
+ FAMILY_V6 = syscall.AF_INET6
+)
+
+var nextSeqNr uint32
+
+// GetIPFamily returns the family type of a net.IP.
+func GetIPFamily(ip net.IP) int {
+ if len(ip) <= net.IPv4len {
+ return FAMILY_V4
+ }
+ if ip.To4() != nil {
+ return FAMILY_V4
+ }
+ return FAMILY_V6
+}
+
+var nativeEndian binary.ByteOrder
+
+// Get native endianness for the system
+func NativeEndian() binary.ByteOrder {
+ if nativeEndian == nil {
+ var x uint32 = 0x01020304
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ nativeEndian = binary.BigEndian
+ } else {
+ nativeEndian = binary.LittleEndian
+ }
+ }
+ return nativeEndian
+}
+
+// Byte swap a 16 bit value if we aren't big endian
+func Swap16(i uint16) uint16 {
+ if NativeEndian() == binary.BigEndian {
+ return i
+ }
+ return (i&0xff00)>>8 | (i&0xff)<<8
+}
+
+// Byte swap a 32 bit value if aren't big endian
+func Swap32(i uint32) uint32 {
+ if NativeEndian() == binary.BigEndian {
+ return i
+ }
+ return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24
+}
+
+type NetlinkRequestData interface {
+ Len() int
+ Serialize() []byte
+}
+
+// IfInfomsg is related to links, but it is used for list requests as well
+type IfInfomsg struct {
+ syscall.IfInfomsg
+}
+
+// Create an IfInfomsg with family specified
+func NewIfInfomsg(family int) *IfInfomsg {
+ return &IfInfomsg{
+ IfInfomsg: syscall.IfInfomsg{
+ Family: uint8(family),
+ },
+ }
+}
+
+func DeserializeIfInfomsg(b []byte) *IfInfomsg {
+ return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0]))
+}
+
+func (msg *IfInfomsg) Serialize() []byte {
+ return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfInfomsg) Len() int {
+ return syscall.SizeofIfInfomsg
+}
+
+func rtaAlignOf(attrlen int) int {
+ return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1)
+}
+
+func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
+ msg := NewIfInfomsg(family)
+ parent.children = append(parent.children, msg)
+ return msg
+}
+
+// Extend RtAttr to handle data and children
+type RtAttr struct {
+ syscall.RtAttr
+ Data []byte
+ children []NetlinkRequestData
+}
+
+// Create a new Extended RtAttr object
+func NewRtAttr(attrType int, data []byte) *RtAttr {
+ return &RtAttr{
+ RtAttr: syscall.RtAttr{
+ Type: uint16(attrType),
+ },
+ children: []NetlinkRequestData{},
+ Data: data,
+ }
+}
+
+// Create a new RtAttr obj anc add it as a child of an existing object
+func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
+ attr := NewRtAttr(attrType, data)
+ parent.children = append(parent.children, attr)
+ return attr
+}
+
+func (a *RtAttr) Len() int {
+ if len(a.children) == 0 {
+ return (syscall.SizeofRtAttr + len(a.Data))
+ }
+
+ l := 0
+ for _, child := range a.children {
+ l += rtaAlignOf(child.Len())
+ }
+ l += syscall.SizeofRtAttr
+ return rtaAlignOf(l + len(a.Data))
+}
+
+// Serialize the RtAttr into a byte array
+// This can't just unsafe.cast because it must iterate through children.
+func (a *RtAttr) Serialize() []byte {
+ native := NativeEndian()
+
+ length := a.Len()
+ buf := make([]byte, rtaAlignOf(length))
+
+ if a.Data != nil {
+ copy(buf[4:], a.Data)
+ } else {
+ next := 4
+ for _, child := range a.children {
+ childBuf := child.Serialize()
+ copy(buf[next:], childBuf)
+ next += rtaAlignOf(len(childBuf))
+ }
+ }
+
+ if l := uint16(length); l != 0 {
+ native.PutUint16(buf[0:2], l)
+ }
+ native.PutUint16(buf[2:4], a.Type)
+ return buf
+}
+
+type NetlinkRequest struct {
+ syscall.NlMsghdr
+ Data []NetlinkRequestData
+}
+
+// Serialize the Netlink Request into a byte array
+func (req *NetlinkRequest) Serialize() []byte {
+ length := syscall.SizeofNlMsghdr
+ dataBytes := make([][]byte, len(req.Data))
+ for i, data := range req.Data {
+ dataBytes[i] = data.Serialize()
+ length = length + len(dataBytes[i])
+ }
+ req.Len = uint32(length)
+ b := make([]byte, length)
+ hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:]
+ next := syscall.SizeofNlMsghdr
+ copy(b[0:next], hdr)
+ for _, data := range dataBytes {
+ for _, dataByte := range data {
+ b[next] = dataByte
+ next = next + 1
+ }
+ }
+ return b
+}
+
+func (req *NetlinkRequest) AddData(data NetlinkRequestData) {
+ if data != nil {
+ req.Data = append(req.Data, data)
+ }
+}
+
+// Execute the request against a the given sockType.
+// Returns a list of netlink messages in seriaized format, optionally filtered
+// by resType.
+func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
+ s, err := getNetlinkSocket(sockType)
+ if err != nil {
+ return nil, err
+ }
+ defer s.Close()
+
+ if err := s.Send(req); err != nil {
+ return nil, err
+ }
+
+ pid, err := s.GetPid()
+ if err != nil {
+ return nil, err
+ }
+
+ var res [][]byte
+
+done:
+ for {
+ msgs, err := s.Receive()
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range msgs {
+ if m.Header.Seq != req.Seq {
+ return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq)
+ }
+ if m.Header.Pid != pid {
+ return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
+ }
+ if m.Header.Type == syscall.NLMSG_DONE {
+ break done
+ }
+ if m.Header.Type == syscall.NLMSG_ERROR {
+ native := NativeEndian()
+ error := int32(native.Uint32(m.Data[0:4]))
+ if error == 0 {
+ break done
+ }
+ return nil, syscall.Errno(-error)
+ }
+ if resType != 0 && m.Header.Type != resType {
+ continue
+ }
+ res = append(res, m.Data)
+ if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
+ break done
+ }
+ }
+ }
+ return res, nil
+}
+
+// Create a new netlink request from proto and flags
+// Note the Len value will be inaccurate once data is added until
+// the message is serialized
+func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
+ return &NetlinkRequest{
+ NlMsghdr: syscall.NlMsghdr{
+ Len: uint32(syscall.SizeofNlMsghdr),
+ Type: uint16(proto),
+ Flags: syscall.NLM_F_REQUEST | uint16(flags),
+ Seq: atomic.AddUint32(&nextSeqNr, 1),
+ },
+ }
+}
+
+type NetlinkSocket struct {
+ fd int
+ lsa syscall.SockaddrNetlink
+}
+
+func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
+ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+ if err != nil {
+ return nil, err
+ }
+ s := &NetlinkSocket{
+ fd: fd,
+ }
+ s.lsa.Family = syscall.AF_NETLINK
+ if err := syscall.Bind(fd, &s.lsa); err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE)
+// and subscribe it to multicast groups passed in variable argument list.
+// Returns the netlink socket on which Receive() method can be called
+// to retrieve the messages from the kernel.
+func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
+ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+ if err != nil {
+ return nil, err
+ }
+ s := &NetlinkSocket{
+ fd: fd,
+ }
+ s.lsa.Family = syscall.AF_NETLINK
+
+ for _, g := range groups {
+ s.lsa.Groups |= (1 << (g - 1))
+ }
+
+ if err := syscall.Bind(fd, &s.lsa); err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+
+ return s, nil
+}
+
+func (s *NetlinkSocket) Close() {
+ syscall.Close(s.fd)
+}
+
+func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
+ if err := syscall.Sendto(s.fd, request.Serialize(), 0, &s.lsa); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
+ rb := make([]byte, syscall.Getpagesize())
+ nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
+ if err != nil {
+ return nil, err
+ }
+ if nr < syscall.NLMSG_HDRLEN {
+ return nil, fmt.Errorf("Got short response from netlink")
+ }
+ rb = rb[:nr]
+ return syscall.ParseNetlinkMessage(rb)
+}
+
+func (s *NetlinkSocket) GetPid() (uint32, error) {
+ lsa, err := syscall.Getsockname(s.fd)
+ if err != nil {
+ return 0, err
+ }
+ switch v := lsa.(type) {
+ case *syscall.SockaddrNetlink:
+ return v.Pid, nil
+ }
+ return 0, fmt.Errorf("Wrong socket type")
+}
+
+func ZeroTerminated(s string) []byte {
+ bytes := make([]byte, len(s)+1)
+ for i := 0; i < len(s); i++ {
+ bytes[i] = s[i]
+ }
+ bytes[len(s)] = 0
+ return bytes
+}
+
+func NonZeroTerminated(s string) []byte {
+ bytes := make([]byte, len(s))
+ for i := 0; i < len(s); i++ {
+ bytes[i] = s[i]
+ }
+ return bytes
+}
+
+func BytesToString(b []byte) string {
+ n := bytes.Index(b, []byte{0})
+ return string(b[:n])
+}
+
+func Uint8Attr(v uint8) []byte {
+ return []byte{byte(v)}
+}
+
+func Uint16Attr(v uint16) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 2)
+ native.PutUint16(bytes, v)
+ return bytes
+}
+
+func Uint32Attr(v uint32) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 4)
+ native.PutUint32(bytes, v)
+ return bytes
+}
+
+func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) {
+ var attrs []syscall.NetlinkRouteAttr
+ for len(b) >= syscall.SizeofRtAttr {
+ a, vbuf, alen, err := netlinkRouteAttrAndValue(b)
+ if err != nil {
+ return nil, err
+ }
+ ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]}
+ attrs = append(attrs, ra)
+ b = b[alen:]
+ }
+ return attrs, nil
+}
+
+func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) {
+ a := (*syscall.RtAttr)(unsafe.Pointer(&b[0]))
+ if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) {
+ return nil, nil, 0, syscall.EINVAL
+ }
+ return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/route_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/route_linux.go
new file mode 100644
index 0000000..447e83e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/route_linux.go
@@ -0,0 +1,42 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type RtMsg struct {
+ syscall.RtMsg
+}
+
+func NewRtMsg() *RtMsg {
+ return &RtMsg{
+ RtMsg: syscall.RtMsg{
+ Table: syscall.RT_TABLE_MAIN,
+ Scope: syscall.RT_SCOPE_UNIVERSE,
+ Protocol: syscall.RTPROT_BOOT,
+ Type: syscall.RTN_UNICAST,
+ },
+ }
+}
+
+func NewRtDelMsg() *RtMsg {
+ return &RtMsg{
+ RtMsg: syscall.RtMsg{
+ Table: syscall.RT_TABLE_MAIN,
+ Scope: syscall.RT_SCOPE_NOWHERE,
+ },
+ }
+}
+
+func (msg *RtMsg) Len() int {
+ return syscall.SizeofRtMsg
+}
+
+func DeserializeRtMsg(b []byte) *RtMsg {
+ return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0]))
+}
+
+func (msg *RtMsg) Serialize() []byte {
+ return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/tc_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/tc_linux.go
new file mode 100644
index 0000000..c9bfe8d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/tc_linux.go
@@ -0,0 +1,359 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+// Message types
+const (
+ TCA_UNSPEC = iota
+ TCA_KIND
+ TCA_OPTIONS
+ TCA_STATS
+ TCA_XSTATS
+ TCA_RATE
+ TCA_FCNT
+ TCA_STATS2
+ TCA_STAB
+ TCA_MAX = TCA_STAB
+)
+
+const (
+ TCA_ACT_TAB = 1
+ TCAA_MAX = 1
+)
+
+const (
+ TCA_PRIO_UNSPEC = iota
+ TCA_PRIO_MQ
+ TCA_PRIO_MAX = TCA_PRIO_MQ
+)
+
+const (
+ SizeofTcMsg = 0x14
+ SizeofTcActionMsg = 0x04
+ SizeofTcPrioMap = 0x14
+ SizeofTcRateSpec = 0x0c
+ SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c
+ SizeofTcU32Key = 0x10
+ SizeofTcU32Sel = 0x10 // without keys
+ SizeofTcMirred = 0x1c
+)
+
+// struct tcmsg {
+// unsigned char tcm_family;
+// unsigned char tcm__pad1;
+// unsigned short tcm__pad2;
+// int tcm_ifindex;
+// __u32 tcm_handle;
+// __u32 tcm_parent;
+// __u32 tcm_info;
+// };
+
+type TcMsg struct {
+ Family uint8
+ Pad [3]byte
+ Ifindex int32
+ Handle uint32
+ Parent uint32
+ Info uint32
+}
+
+func (msg *TcMsg) Len() int {
+ return SizeofTcMsg
+}
+
+func DeserializeTcMsg(b []byte) *TcMsg {
+ return (*TcMsg)(unsafe.Pointer(&b[0:SizeofTcMsg][0]))
+}
+
+func (x *TcMsg) Serialize() []byte {
+ return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tcamsg {
+// unsigned char tca_family;
+// unsigned char tca__pad1;
+// unsigned short tca__pad2;
+// };
+
+type TcActionMsg struct {
+ Family uint8
+ Pad [3]byte
+}
+
+func (msg *TcActionMsg) Len() int {
+ return SizeofTcActionMsg
+}
+
+func DeserializeTcActionMsg(b []byte) *TcActionMsg {
+ return (*TcActionMsg)(unsafe.Pointer(&b[0:SizeofTcActionMsg][0]))
+}
+
+func (x *TcActionMsg) Serialize() []byte {
+ return (*(*[SizeofTcActionMsg]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TC_PRIO_MAX = 15
+)
+
+// struct tc_prio_qopt {
+// int bands; /* Number of bands */
+// __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
+// };
+
+type TcPrioMap struct {
+ Bands int32
+ Priomap [TC_PRIO_MAX + 1]uint8
+}
+
+func (msg *TcPrioMap) Len() int {
+ return SizeofTcPrioMap
+}
+
+func DeserializeTcPrioMap(b []byte) *TcPrioMap {
+ return (*TcPrioMap)(unsafe.Pointer(&b[0:SizeofTcPrioMap][0]))
+}
+
+func (x *TcPrioMap) Serialize() []byte {
+ return (*(*[SizeofTcPrioMap]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_TBF_UNSPEC = iota
+ TCA_TBF_PARMS
+ TCA_TBF_RTAB
+ TCA_TBF_PTAB
+ TCA_TBF_RATE64
+ TCA_TBF_PRATE64
+ TCA_TBF_BURST
+ TCA_TBF_PBURST
+ TCA_TBF_MAX = TCA_TBF_PBURST
+)
+
+// struct tc_ratespec {
+// unsigned char cell_log;
+// __u8 linklayer; /* lower 4 bits */
+// unsigned short overhead;
+// short cell_align;
+// unsigned short mpu;
+// __u32 rate;
+// };
+
+type TcRateSpec struct {
+ CellLog uint8
+ Linklayer uint8
+ Overhead uint16
+ CellAlign int16
+ Mpu uint16
+ Rate uint32
+}
+
+func (msg *TcRateSpec) Len() int {
+ return SizeofTcRateSpec
+}
+
+func DeserializeTcRateSpec(b []byte) *TcRateSpec {
+ return (*TcRateSpec)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0]))
+}
+
+func (x *TcRateSpec) Serialize() []byte {
+ return (*(*[SizeofTcRateSpec]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_tbf_qopt {
+// struct tc_ratespec rate;
+// struct tc_ratespec peakrate;
+// __u32 limit;
+// __u32 buffer;
+// __u32 mtu;
+// };
+
+type TcTbfQopt struct {
+ Rate TcRateSpec
+ Peakrate TcRateSpec
+ Limit uint32
+ Buffer uint32
+ Mtu uint32
+}
+
+func (msg *TcTbfQopt) Len() int {
+ return SizeofTcTbfQopt
+}
+
+func DeserializeTcTbfQopt(b []byte) *TcTbfQopt {
+ return (*TcTbfQopt)(unsafe.Pointer(&b[0:SizeofTcTbfQopt][0]))
+}
+
+func (x *TcTbfQopt) Serialize() []byte {
+ return (*(*[SizeofTcTbfQopt]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_U32_UNSPEC = iota
+ TCA_U32_CLASSID
+ TCA_U32_HASH
+ TCA_U32_LINK
+ TCA_U32_DIVISOR
+ TCA_U32_SEL
+ TCA_U32_POLICE
+ TCA_U32_ACT
+ TCA_U32_INDEV
+ TCA_U32_PCNT
+ TCA_U32_MARK
+ TCA_U32_MAX = TCA_U32_MARK
+)
+
+// struct tc_u32_key {
+// __be32 mask;
+// __be32 val;
+// int off;
+// int offmask;
+// };
+
+type TcU32Key struct {
+ Mask uint32 // big endian
+ Val uint32 // big endian
+ Off int32
+ OffMask int32
+}
+
+func (msg *TcU32Key) Len() int {
+ return SizeofTcU32Key
+}
+
+func DeserializeTcU32Key(b []byte) *TcU32Key {
+ return (*TcU32Key)(unsafe.Pointer(&b[0:SizeofTcU32Key][0]))
+}
+
+func (x *TcU32Key) Serialize() []byte {
+ return (*(*[SizeofTcU32Key]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct tc_u32_sel {
+// unsigned char flags;
+// unsigned char offshift;
+// unsigned char nkeys;
+//
+// __be16 offmask;
+// __u16 off;
+// short offoff;
+//
+// short hoff;
+// __be32 hmask;
+// struct tc_u32_key keys[0];
+// };
+
+const (
+ TC_U32_TERMINAL = 1 << iota
+ TC_U32_OFFSET = 1 << iota
+ TC_U32_VAROFFSET = 1 << iota
+ TC_U32_EAT = 1 << iota
+)
+
+type TcU32Sel struct {
+ Flags uint8
+ Offshift uint8
+ Nkeys uint8
+ Pad uint8
+ Offmask uint16 // big endian
+ Off uint16
+ Offoff int16
+ Hoff int16
+ Hmask uint32 // big endian
+ Keys []TcU32Key
+}
+
+func (msg *TcU32Sel) Len() int {
+ return SizeofTcU32Sel + int(msg.Nkeys)*SizeofTcU32Key
+}
+
+func DeserializeTcU32Sel(b []byte) *TcU32Sel {
+ x := &TcU32Sel{}
+ copy((*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:], b)
+ next := SizeofTcU32Sel
+ var i uint8
+ for i = 0; i < x.Nkeys; i++ {
+ x.Keys = append(x.Keys, *DeserializeTcU32Key(b[next:]))
+ next += SizeofTcU32Key
+ }
+ return x
+}
+
+func (x *TcU32Sel) Serialize() []byte {
+ // This can't just unsafe.cast because it must iterate through keys.
+ buf := make([]byte, x.Len())
+ copy(buf, (*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:])
+ next := SizeofTcU32Sel
+ for _, key := range x.Keys {
+ keyBuf := key.Serialize()
+ copy(buf[next:], keyBuf)
+ next += SizeofTcU32Key
+ }
+ return buf
+}
+
+const (
+ TCA_ACT_MIRRED = 8
+)
+
+const (
+ TCA_MIRRED_UNSPEC = iota
+ TCA_MIRRED_TM
+ TCA_MIRRED_PARMS
+ TCA_MIRRED_MAX = TCA_MIRRED_PARMS
+)
+
+const (
+ TCA_EGRESS_REDIR = 1 /* packet redirect to EGRESS*/
+ TCA_EGRESS_MIRROR = 2 /* mirror packet to EGRESS */
+ TCA_INGRESS_REDIR = 3 /* packet redirect to INGRESS*/
+ TCA_INGRESS_MIRROR = 4 /* mirror packet to INGRESS */
+)
+
+const (
+ TC_ACT_UNSPEC = int32(-1)
+ TC_ACT_OK = 0
+ TC_ACT_RECLASSIFY = 1
+ TC_ACT_SHOT = 2
+ TC_ACT_PIPE = 3
+ TC_ACT_STOLEN = 4
+ TC_ACT_QUEUED = 5
+ TC_ACT_REPEAT = 6
+ TC_ACT_JUMP = 0x10000000
+)
+
+// #define tc_gen \
+// __u32 index; \
+// __u32 capab; \
+// int action; \
+// int refcnt; \
+// int bindcnt
+// struct tc_mirred {
+// tc_gen;
+// int eaction; /* one of IN/EGRESS_MIRROR/REDIR */
+// __u32 ifindex; /* ifindex of egress port */
+// };
+
+type TcMirred struct {
+ Index uint32
+ Capab uint32
+ Action int32
+ Refcnt int32
+ Bindcnt int32
+ Eaction int32
+ Ifindex uint32
+}
+
+func (msg *TcMirred) Len() int {
+ return SizeofTcMirred
+}
+
+func DeserializeTcMirred(b []byte) *TcMirred {
+ return (*TcMirred)(unsafe.Pointer(&b[0:SizeofTcMirred][0]))
+}
+
+func (x *TcMirred) Serialize() []byte {
+ return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:]
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
new file mode 100644
index 0000000..d24637d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -0,0 +1,258 @@
+package nl
+
+import (
+ "bytes"
+ "net"
+ "unsafe"
+)
+
+// Infinity for packet and byte counts
+const (
+ XFRM_INF = ^uint64(0)
+)
+
+// Message Types
+const (
+ XFRM_MSG_BASE = 0x10
+ XFRM_MSG_NEWSA = 0x10
+ XFRM_MSG_DELSA = 0x11
+ XFRM_MSG_GETSA = 0x12
+ XFRM_MSG_NEWPOLICY = 0x13
+ XFRM_MSG_DELPOLICY = 0x14
+ XFRM_MSG_GETPOLICY = 0x15
+ XFRM_MSG_ALLOCSPI = 0x16
+ XFRM_MSG_ACQUIRE = 0x17
+ XFRM_MSG_EXPIRE = 0x18
+ XFRM_MSG_UPDPOLICY = 0x19
+ XFRM_MSG_UPDSA = 0x1a
+ XFRM_MSG_POLEXPIRE = 0x1b
+ XFRM_MSG_FLUSHSA = 0x1c
+ XFRM_MSG_FLUSHPOLICY = 0x1d
+ XFRM_MSG_NEWAE = 0x1e
+ XFRM_MSG_GETAE = 0x1f
+ XFRM_MSG_REPORT = 0x20
+ XFRM_MSG_MIGRATE = 0x21
+ XFRM_MSG_NEWSADINFO = 0x22
+ XFRM_MSG_GETSADINFO = 0x23
+ XFRM_MSG_NEWSPDINFO = 0x24
+ XFRM_MSG_GETSPDINFO = 0x25
+ XFRM_MSG_MAPPING = 0x26
+ XFRM_MSG_MAX = 0x26
+ XFRM_NR_MSGTYPES = 0x17
+)
+
+// Attribute types
+const (
+ /* Netlink message attributes. */
+ XFRMA_UNSPEC = 0x00
+ XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */
+ XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */
+ XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */
+ XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */
+ XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */
+ XFRMA_SA = 0x06 /* struct xfrm_usersa_info */
+ XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */
+ XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */
+ XFRMA_LTIME_VAL = 0x09
+ XFRMA_REPLAY_VAL = 0x0a
+ XFRMA_REPLAY_THRESH = 0x0b
+ XFRMA_ETIMER_THRESH = 0x0c
+ XFRMA_SRCADDR = 0x0d /* xfrm_address_t */
+ XFRMA_COADDR = 0x0e /* xfrm_address_t */
+ XFRMA_LASTUSED = 0x0f /* unsigned long */
+ XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */
+ XFRMA_MIGRATE = 0x11
+ XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */
+ XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */
+ XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */
+ XFRMA_MARK = 0x15 /* struct xfrm_mark */
+ XFRMA_TFCPAD = 0x16 /* __u32 */
+ XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */
+ XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */
+ XFRMA_MAX = 0x18
+)
+
+const (
+ SizeofXfrmAddress = 0x10
+ SizeofXfrmSelector = 0x38
+ SizeofXfrmLifetimeCfg = 0x40
+ SizeofXfrmLifetimeCur = 0x20
+ SizeofXfrmId = 0x18
+)
+
+// typedef union {
+// __be32 a4;
+// __be32 a6[4];
+// } xfrm_address_t;
+
+type XfrmAddress [SizeofXfrmAddress]byte
+
+func (x *XfrmAddress) ToIP() net.IP {
+ var empty = [12]byte{}
+ ip := make(net.IP, net.IPv6len)
+ if bytes.Equal(x[4:16], empty[:]) {
+ ip[10] = 0xff
+ ip[11] = 0xff
+ copy(ip[12:16], x[0:4])
+ } else {
+ copy(ip[:], x[:])
+ }
+ return ip
+}
+
+func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet {
+ ip := x.ToIP()
+ if GetIPFamily(ip) == FAMILY_V4 {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)}
+ }
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)}
+}
+
+func (x *XfrmAddress) FromIP(ip net.IP) {
+ var empty = [16]byte{}
+ if len(ip) < net.IPv4len {
+ copy(x[4:16], empty[:])
+ } else if GetIPFamily(ip) == FAMILY_V4 {
+ copy(x[0:4], ip.To4()[0:4])
+ copy(x[4:16], empty[:12])
+ } else {
+ copy(x[0:16], ip.To16()[0:16])
+ }
+}
+
+func DeserializeXfrmAddress(b []byte) *XfrmAddress {
+ return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0]))
+}
+
+func (x *XfrmAddress) Serialize() []byte {
+ return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(x)))[:]
+}
+
+// struct xfrm_selector {
+// xfrm_address_t daddr;
+// xfrm_address_t saddr;
+// __be16 dport;
+// __be16 dport_mask;
+// __be16 sport;
+// __be16 sport_mask;
+// __u16 family;
+// __u8 prefixlen_d;
+// __u8 prefixlen_s;
+// __u8 proto;
+// int ifindex;
+// __kernel_uid32_t user;
+// };
+
+type XfrmSelector struct {
+ Daddr XfrmAddress
+ Saddr XfrmAddress
+ Dport uint16 // big endian
+ DportMask uint16 // big endian
+ Sport uint16 // big endian
+ SportMask uint16 // big endian
+ Family uint16
+ PrefixlenD uint8
+ PrefixlenS uint8
+ Proto uint8
+ Pad [3]byte
+ Ifindex int32
+ User uint32
+}
+
+func (msg *XfrmSelector) Len() int {
+ return SizeofXfrmSelector
+}
+
+func DeserializeXfrmSelector(b []byte) *XfrmSelector {
+ return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0]))
+}
+
+func (msg *XfrmSelector) Serialize() []byte {
+ return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cfg {
+// __u64 soft_byte_limit;
+// __u64 hard_byte_limit;
+// __u64 soft_packet_limit;
+// __u64 hard_packet_limit;
+// __u64 soft_add_expires_seconds;
+// __u64 hard_add_expires_seconds;
+// __u64 soft_use_expires_seconds;
+// __u64 hard_use_expires_seconds;
+// };
+//
+
+type XfrmLifetimeCfg struct {
+ SoftByteLimit uint64
+ HardByteLimit uint64
+ SoftPacketLimit uint64
+ HardPacketLimit uint64
+ SoftAddExpiresSeconds uint64
+ HardAddExpiresSeconds uint64
+ SoftUseExpiresSeconds uint64
+ HardUseExpiresSeconds uint64
+}
+
+func (msg *XfrmLifetimeCfg) Len() int {
+ return SizeofXfrmLifetimeCfg
+}
+
+func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg {
+ return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0]))
+}
+
+func (msg *XfrmLifetimeCfg) Serialize() []byte {
+ return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cur {
+// __u64 bytes;
+// __u64 packets;
+// __u64 add_time;
+// __u64 use_time;
+// };
+
+type XfrmLifetimeCur struct {
+ Bytes uint64
+ Packets uint64
+ AddTime uint64
+ UseTime uint64
+}
+
+func (msg *XfrmLifetimeCur) Len() int {
+ return SizeofXfrmLifetimeCur
+}
+
+func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur {
+ return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0]))
+}
+
+func (msg *XfrmLifetimeCur) Serialize() []byte {
+ return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_id {
+// xfrm_address_t daddr;
+// __be32 spi;
+// __u8 proto;
+// };
+
+type XfrmId struct {
+ Daddr XfrmAddress
+ Spi uint32 // big endian
+ Proto uint8
+ Pad [3]byte
+}
+
+func (msg *XfrmId) Len() int {
+ return SizeofXfrmId
+}
+
+func DeserializeXfrmId(b []byte) *XfrmId {
+ return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0]))
+}
+
+func (msg *XfrmId) Serialize() []byte {
+ return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
new file mode 100644
index 0000000..66f7e03
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
@@ -0,0 +1,119 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUserpolicyId = 0x40
+ SizeofXfrmUserpolicyInfo = 0xa8
+ SizeofXfrmUserTmpl = 0x40
+)
+
+// struct xfrm_userpolicy_id {
+// struct xfrm_selector sel;
+// __u32 index;
+// __u8 dir;
+// };
+//
+
+type XfrmUserpolicyId struct {
+ Sel XfrmSelector
+ Index uint32
+ Dir uint8
+ Pad [3]byte
+}
+
+func (msg *XfrmUserpolicyId) Len() int {
+ return SizeofXfrmUserpolicyId
+}
+
+func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId {
+ return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0]))
+}
+
+func (msg *XfrmUserpolicyId) Serialize() []byte {
+ return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_userpolicy_info {
+// struct xfrm_selector sel;
+// struct xfrm_lifetime_cfg lft;
+// struct xfrm_lifetime_cur curlft;
+// __u32 priority;
+// __u32 index;
+// __u8 dir;
+// __u8 action;
+// #define XFRM_POLICY_ALLOW 0
+// #define XFRM_POLICY_BLOCK 1
+// __u8 flags;
+// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */
+// /* Automatically expand selector to include matching ICMP payloads. */
+// #define XFRM_POLICY_ICMP 2
+// __u8 share;
+// };
+
+type XfrmUserpolicyInfo struct {
+ Sel XfrmSelector
+ Lft XfrmLifetimeCfg
+ Curlft XfrmLifetimeCur
+ Priority uint32
+ Index uint32
+ Dir uint8
+ Action uint8
+ Flags uint8
+ Share uint8
+ Pad [4]byte
+}
+
+func (msg *XfrmUserpolicyInfo) Len() int {
+ return SizeofXfrmUserpolicyInfo
+}
+
+func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo {
+ return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0]))
+}
+
+func (msg *XfrmUserpolicyInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_user_tmpl {
+// struct xfrm_id id;
+// __u16 family;
+// xfrm_address_t saddr;
+// __u32 reqid;
+// __u8 mode;
+// __u8 share;
+// __u8 optional;
+// __u32 aalgos;
+// __u32 ealgos;
+// __u32 calgos;
+// }
+
+type XfrmUserTmpl struct {
+ XfrmId XfrmId
+ Family uint16
+ Pad1 [2]byte
+ Saddr XfrmAddress
+ Reqid uint32
+ Mode uint8
+ Share uint8
+ Optional uint8
+ Pad2 byte
+ Aalgos uint32
+ Ealgos uint32
+ Calgos uint32
+}
+
+func (msg *XfrmUserTmpl) Len() int {
+ return SizeofXfrmUserTmpl
+}
+
+func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl {
+ return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0]))
+}
+
+func (msg *XfrmUserTmpl) Serialize() []byte {
+ return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
new file mode 100644
index 0000000..4876ce4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
@@ -0,0 +1,221 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUsersaId = 0x18
+ SizeofXfrmStats = 0x0c
+ SizeofXfrmUsersaInfo = 0xe0
+ SizeofXfrmAlgo = 0x44
+ SizeofXfrmAlgoAuth = 0x48
+ SizeofXfrmEncapTmpl = 0x18
+)
+
+// struct xfrm_usersa_id {
+// xfrm_address_t daddr;
+// __be32 spi;
+// __u16 family;
+// __u8 proto;
+// };
+
+type XfrmUsersaId struct {
+ Daddr XfrmAddress
+ Spi uint32 // big endian
+ Family uint16
+ Proto uint8
+ Pad byte
+}
+
+func (msg *XfrmUsersaId) Len() int {
+ return SizeofXfrmUsersaId
+}
+
+func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId {
+ return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0]))
+}
+
+func (msg *XfrmUsersaId) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_stats {
+// __u32 replay_window;
+// __u32 replay;
+// __u32 integrity_failed;
+// };
+
+type XfrmStats struct {
+ ReplayWindow uint32
+ Replay uint32
+ IntegrityFailed uint32
+}
+
+func (msg *XfrmStats) Len() int {
+ return SizeofXfrmStats
+}
+
+func DeserializeXfrmStats(b []byte) *XfrmStats {
+ return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0]))
+}
+
+func (msg *XfrmStats) Serialize() []byte {
+ return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_usersa_info {
+// struct xfrm_selector sel;
+// struct xfrm_id id;
+// xfrm_address_t saddr;
+// struct xfrm_lifetime_cfg lft;
+// struct xfrm_lifetime_cur curlft;
+// struct xfrm_stats stats;
+// __u32 seq;
+// __u32 reqid;
+// __u16 family;
+// __u8 mode; /* XFRM_MODE_xxx */
+// __u8 replay_window;
+// __u8 flags;
+// #define XFRM_STATE_NOECN 1
+// #define XFRM_STATE_DECAP_DSCP 2
+// #define XFRM_STATE_NOPMTUDISC 4
+// #define XFRM_STATE_WILDRECV 8
+// #define XFRM_STATE_ICMP 16
+// #define XFRM_STATE_AF_UNSPEC 32
+// #define XFRM_STATE_ALIGN4 64
+// #define XFRM_STATE_ESN 128
+// };
+//
+// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+//
+
+type XfrmUsersaInfo struct {
+ Sel XfrmSelector
+ Id XfrmId
+ Saddr XfrmAddress
+ Lft XfrmLifetimeCfg
+ Curlft XfrmLifetimeCur
+ Stats XfrmStats
+ Seq uint32
+ Reqid uint32
+ Family uint16
+ Mode uint8
+ ReplayWindow uint8
+ Flags uint8
+ Pad [7]byte
+}
+
+func (msg *XfrmUsersaInfo) Len() int {
+ return SizeofXfrmUsersaInfo
+}
+
+func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo {
+ return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0]))
+}
+
+func (msg *XfrmUsersaInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_algo {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// char alg_key[0];
+// };
+
+type XfrmAlgo struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgo) Len() int {
+ return SizeofXfrmAlgo + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgo(b []byte) *XfrmAlgo {
+ ret := XfrmAlgo{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgKey = b[68:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgo) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_algo_auth {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// unsigned int alg_trunc_len; /* in bits */
+// char alg_key[0];
+// };
+
+type XfrmAlgoAuth struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgTruncLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgoAuth) Len() int {
+ return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth {
+ ret := XfrmAlgoAuth{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68]))
+ ret.AlgKey = b[72:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgoAuth) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:])
+ copy(b[72:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_algo_aead {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// unsigned int alg_icv_len; /* in bits */
+// char alg_key[0];
+// }
+
+// struct xfrm_encap_tmpl {
+// __u16 encap_type;
+// __be16 encap_sport;
+// __be16 encap_dport;
+// xfrm_address_t encap_oa;
+// };
+
+type XfrmEncapTmpl struct {
+ EncapType uint16
+ EncapSport uint16 // big endian
+ EncapDport uint16 // big endian
+ Pad [2]byte
+ EncapOa XfrmAddress
+}
+
+func (msg *XfrmEncapTmpl) Len() int {
+ return SizeofXfrmEncapTmpl
+}
+
+func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl {
+ return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0]))
+}
+
+func (msg *XfrmEncapTmpl) Serialize() []byte {
+ return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo.go
new file mode 100644
index 0000000..f39ab8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo.go
@@ -0,0 +1,53 @@
+package netlink
+
+import (
+ "strings"
+)
+
+// Protinfo represents bridge flags from netlink.
+type Protinfo struct {
+ Hairpin bool
+ Guard bool
+ FastLeave bool
+ RootBlock bool
+ Learning bool
+ Flood bool
+}
+
+// String returns a list of enabled flags
+func (prot *Protinfo) String() string {
+ var boolStrings []string
+ if prot.Hairpin {
+ boolStrings = append(boolStrings, "Hairpin")
+ }
+ if prot.Guard {
+ boolStrings = append(boolStrings, "Guard")
+ }
+ if prot.FastLeave {
+ boolStrings = append(boolStrings, "FastLeave")
+ }
+ if prot.RootBlock {
+ boolStrings = append(boolStrings, "RootBlock")
+ }
+ if prot.Learning {
+ boolStrings = append(boolStrings, "Learning")
+ }
+ if prot.Flood {
+ boolStrings = append(boolStrings, "Flood")
+ }
+ return strings.Join(boolStrings, " ")
+}
+
+func boolToByte(x bool) []byte {
+ if x {
+ return []byte{1}
+ }
+ return []byte{0}
+}
+
+func byteToBool(x byte) bool {
+ if uint8(x) != 0 {
+ return true
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo_linux.go
new file mode 100644
index 0000000..7181eba
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo_linux.go
@@ -0,0 +1,60 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func LinkGetProtinfo(link Link) (Protinfo, error) {
+ base := link.Attrs()
+ ensureIndex(base)
+ var pi Protinfo
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ req.AddData(msg)
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return pi, err
+ }
+
+ for _, m := range msgs {
+ ans := nl.DeserializeIfInfomsg(m)
+ if int(ans.Index) != base.Index {
+ continue
+ }
+ attrs, err := nl.ParseRouteAttr(m[ans.Len():])
+ if err != nil {
+ return pi, err
+ }
+ for _, attr := range attrs {
+ if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED {
+ continue
+ }
+ infos, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return pi, err
+ }
+ var pi Protinfo
+ for _, info := range infos {
+ switch info.Attr.Type {
+ case nl.IFLA_BRPORT_MODE:
+ pi.Hairpin = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_GUARD:
+ pi.Guard = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_FAST_LEAVE:
+ pi.FastLeave = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_PROTECT:
+ pi.RootBlock = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_LEARNING:
+ pi.Learning = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_UNICAST_FLOOD:
+ pi.Flood = byteToBool(info.Value[0])
+ }
+ }
+ return pi, nil
+ }
+ }
+ return pi, fmt.Errorf("Device with index %d not found", base.Index)
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc.go
new file mode 100644
index 0000000..8e3d020
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc.go
@@ -0,0 +1,138 @@
+package netlink
+
+import (
+ "fmt"
+)
+
+const (
+ HANDLE_NONE = 0
+ HANDLE_INGRESS = 0xFFFFFFF1
+ HANDLE_ROOT = 0xFFFFFFFF
+ PRIORITY_MAP_LEN = 16
+)
+
+type Qdisc interface {
+ Attrs() *QdiscAttrs
+ Type() string
+}
+
+// Qdisc represents a netlink qdisc. A qdisc is associated with a link,
+// has a handle, a parent and a refcnt. The root qdisc of a device should
+// have parent == HANDLE_ROOT.
+type QdiscAttrs struct {
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Refcnt uint32 // read only
+}
+
+func (q QdiscAttrs) String() string {
+ return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %s}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt)
+}
+
+func MakeHandle(major, minor uint16) uint32 {
+ return (uint32(major) << 16) | uint32(minor)
+}
+
+func MajorMinor(handle uint32) (uint16, uint16) {
+ return uint16((handle & 0xFFFF0000) >> 16), uint16(handle & 0x0000FFFFF)
+}
+
+func HandleStr(handle uint32) string {
+ switch handle {
+ case HANDLE_NONE:
+ return "none"
+ case HANDLE_INGRESS:
+ return "ingress"
+ case HANDLE_ROOT:
+ return "root"
+ default:
+ major, minor := MajorMinor(handle)
+ return fmt.Sprintf("%x:%x", major, minor)
+ }
+}
+
+// PfifoFast is the default qdisc created by the kernel if one has not
+// been defined for the interface
+type PfifoFast struct {
+ QdiscAttrs
+ Bands uint8
+ PriorityMap [PRIORITY_MAP_LEN]uint8
+}
+
+func (qdisc *PfifoFast) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *PfifoFast) Type() string {
+ return "pfifo_fast"
+}
+
+// Prio is a basic qdisc that works just like PfifoFast
+type Prio struct {
+ QdiscAttrs
+ Bands uint8
+ PriorityMap [PRIORITY_MAP_LEN]uint8
+}
+
+func NewPrio(attrs QdiscAttrs) *Prio {
+ return &Prio{
+ QdiscAttrs: attrs,
+ Bands: 3,
+ PriorityMap: [PRIORITY_MAP_LEN]uint8{1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
+ }
+}
+
+func (qdisc *Prio) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Prio) Type() string {
+ return "prio"
+}
+
+// Tbf is a classful qdisc that rate limits based on tokens
+type Tbf struct {
+ QdiscAttrs
+ // TODO: handle 64bit rate properly
+ Rate uint64
+ Limit uint32
+ Buffer uint32
+ // TODO: handle other settings
+}
+
+func (qdisc *Tbf) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Tbf) Type() string {
+ return "tbf"
+}
+
+// Ingress is a qdisc for adding ingress filters
+type Ingress struct {
+ QdiscAttrs
+}
+
+func (qdisc *Ingress) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Ingress) Type() string {
+ return "ingress"
+}
+
+// GenericQdisc qdiscs represent types that are not currently understood
+// by this netlink library.
+type GenericQdisc struct {
+ QdiscAttrs
+ QdiscType string
+}
+
+func (qdisc *GenericQdisc) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *GenericQdisc) Type() string {
+ return qdisc.QdiscType
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc_linux.go
new file mode 100644
index 0000000..2531c9d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc_linux.go
@@ -0,0 +1,263 @@
+package netlink
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// QdiscDel will delete a qdisc from the system.
+// Equivalent to: `tc qdisc del $qdisc`
+func QdiscDel(qdisc Qdisc) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELQDISC, syscall.NLM_F_ACK)
+ base := qdisc.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ }
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// QdiscAdd will add a qdisc to the system.
+// Equivalent to: `tc qdisc add $qdisc`
+func QdiscAdd(qdisc Qdisc) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWQDISC, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ base := qdisc.Attrs()
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: int32(base.LinkIndex),
+ Handle: base.Handle,
+ Parent: base.Parent,
+ }
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type())))
+
+ options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
+ if prio, ok := qdisc.(*Prio); ok {
+ tcmap := nl.TcPrioMap{
+ Bands: int32(prio.Bands),
+ Priomap: prio.PriorityMap,
+ }
+ options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize())
+ } else if tbf, ok := qdisc.(*Tbf); ok {
+ opt := nl.TcTbfQopt{}
+ // TODO: handle rate > uint32
+ opt.Rate.Rate = uint32(tbf.Rate)
+ opt.Limit = tbf.Limit
+ opt.Buffer = tbf.Buffer
+ nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize())
+ } else if _, ok := qdisc.(*Ingress); ok {
+ // ingress filters must use the proper handle
+ if msg.Parent != HANDLE_INGRESS {
+ return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS")
+ }
+ }
+ req.AddData(options)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// QdiscList gets a list of qdiscs in the system.
+// Equivalent to: `tc qdisc show`.
+// The list can be filtered by link.
+func QdiscList(link Link) ([]Qdisc, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP)
+ index := int32(0)
+ if link != nil {
+ base := link.Attrs()
+ ensureIndex(base)
+ index = int32(base.Index)
+ }
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: index,
+ }
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Qdisc
+ for _, m := range msgs {
+ msg := nl.DeserializeTcMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ // skip qdiscs from other interfaces
+ if link != nil && msg.Ifindex != index {
+ continue
+ }
+
+ base := QdiscAttrs{
+ LinkIndex: int(msg.Ifindex),
+ Handle: msg.Handle,
+ Parent: msg.Parent,
+ Refcnt: msg.Info,
+ }
+ var qdisc Qdisc
+ qdiscType := ""
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.TCA_KIND:
+ qdiscType = string(attr.Value[:len(attr.Value)-1])
+ switch qdiscType {
+ case "pfifo_fast":
+ qdisc = &PfifoFast{}
+ case "prio":
+ qdisc = &Prio{}
+ case "tbf":
+ qdisc = &Tbf{}
+ case "ingress":
+ qdisc = &Ingress{}
+ default:
+ qdisc = &GenericQdisc{QdiscType: qdiscType}
+ }
+ case nl.TCA_OPTIONS:
+ switch qdiscType {
+ case "pfifo_fast":
+ // pfifo returns TcPrioMap directly without wrapping it in rtattr
+ if err := parsePfifoFastData(qdisc, attr.Value); err != nil {
+ return nil, err
+ }
+ case "prio":
+ // prio returns TcPrioMap directly without wrapping it in rtattr
+ if err := parsePrioData(qdisc, attr.Value); err != nil {
+ return nil, err
+ }
+ case "tbf":
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ if err := parseTbfData(qdisc, data); err != nil {
+ return nil, err
+ }
+ // no options for ingress
+ }
+ }
+ }
+ *qdisc.Attrs() = base
+ res = append(res, qdisc)
+ }
+
+ return res, nil
+}
+
+func parsePfifoFastData(qdisc Qdisc, value []byte) error {
+ pfifo := qdisc.(*PfifoFast)
+ tcmap := nl.DeserializeTcPrioMap(value)
+ pfifo.PriorityMap = tcmap.Priomap
+ pfifo.Bands = uint8(tcmap.Bands)
+ return nil
+}
+
+func parsePrioData(qdisc Qdisc, value []byte) error {
+ prio := qdisc.(*Prio)
+ tcmap := nl.DeserializeTcPrioMap(value)
+ prio.PriorityMap = tcmap.Priomap
+ prio.Bands = uint8(tcmap.Bands)
+ return nil
+}
+
+func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
+ native = nl.NativeEndian()
+ tbf := qdisc.(*Tbf)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.TCA_TBF_PARMS:
+ opt := nl.DeserializeTcTbfQopt(datum.Value)
+ tbf.Rate = uint64(opt.Rate.Rate)
+ tbf.Limit = opt.Limit
+ tbf.Buffer = opt.Buffer
+ case nl.TCA_TBF_RATE64:
+ tbf.Rate = native.Uint64(datum.Value[0:4])
+ }
+ }
+ return nil
+}
+
+const (
+ TIME_UNITS_PER_SEC = 1000000
+)
+
+var (
+ tickInUsec float64 = 0.0
+ clockFactor float64 = 0.0
+)
+
+func initClock() {
+ data, err := ioutil.ReadFile("/proc/net/psched")
+ if err != nil {
+ return
+ }
+ parts := strings.Split(strings.TrimSpace(string(data)), " ")
+ if len(parts) < 3 {
+ return
+ }
+ var vals [3]uint64
+ for i := range vals {
+ val, err := strconv.ParseUint(parts[i], 16, 32)
+ if err != nil {
+ return
+ }
+ vals[i] = val
+ }
+ // compatibility
+ if vals[2] == 1000000000 {
+ vals[0] = vals[1]
+ }
+ clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC
+ tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor
+}
+
+func TickInUsec() float64 {
+ if tickInUsec == 0.0 {
+ initClock()
+ }
+ return tickInUsec
+}
+
+func ClockFactor() float64 {
+ if clockFactor == 0.0 {
+ initClock()
+ }
+ return clockFactor
+}
+
+func time2Tick(time uint32) uint32 {
+ return uint32(float64(time) * TickInUsec())
+}
+
+func tick2Time(tick uint32) uint32 {
+ return uint32(float64(tick) / TickInUsec())
+}
+
+func time2Ktime(time uint32) uint32 {
+ return uint32(float64(time) * ClockFactor())
+}
+
+func ktime2Time(ktime uint32) uint32 {
+ return uint32(float64(ktime) / ClockFactor())
+}
+
+func burst(rate uint64, buffer uint32) uint32 {
+ return uint32(float64(rate) * float64(tick2Time(buffer)) / TIME_UNITS_PER_SEC)
+}
+
+func latency(rate uint64, limit, buffer uint32) float64 {
+ return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer))
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/route.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/route.go
new file mode 100644
index 0000000..6218546
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/route.go
@@ -0,0 +1,35 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "syscall"
+)
+
+// Scope is an enum representing a route scope.
+type Scope uint8
+
+const (
+ SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE
+ SCOPE_SITE Scope = syscall.RT_SCOPE_SITE
+ SCOPE_LINK Scope = syscall.RT_SCOPE_LINK
+ SCOPE_HOST Scope = syscall.RT_SCOPE_HOST
+ SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE
+)
+
+// Route represents a netlink route. A route is associated with a link,
+// has a destination network, an optional source ip, and optional
+// gateway. Advanced route parameters and non-main routing tables are
+// currently not supported.
+type Route struct {
+ LinkIndex int
+ Scope Scope
+ Dst *net.IPNet
+ Src net.IP
+ Gw net.IP
+}
+
+func (r Route) String() string {
+ return fmt.Sprintf("{Ifindex: %d Dst: %s Src: %s Gw: %s}", r.LinkIndex, r.Dst,
+ r.Src, r.Gw)
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/route_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/route_linux.go
new file mode 100644
index 0000000..9e76d44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/route_linux.go
@@ -0,0 +1,225 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// RtAttr is shared so it is in netlink_linux.go
+
+// RouteAdd will add a route to the system.
+// Equivalent to: `ip route add $route`
+func RouteAdd(route *Route) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return routeHandle(route, req, nl.NewRtMsg())
+}
+
+// RouteAdd will delete a route from the system.
+// Equivalent to: `ip route del $route`
+func RouteDel(route *Route) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
+ return routeHandle(route, req, nl.NewRtDelMsg())
+}
+
+func routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
+ if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil {
+ return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
+ }
+
+ msg.Scope = uint8(route.Scope)
+ family := -1
+ var rtAttrs []*nl.RtAttr
+
+ if route.Dst != nil && route.Dst.IP != nil {
+ dstLen, _ := route.Dst.Mask.Size()
+ msg.Dst_len = uint8(dstLen)
+ dstFamily := nl.GetIPFamily(route.Dst.IP)
+ family = dstFamily
+ var dstData []byte
+ if dstFamily == FAMILY_V4 {
+ dstData = route.Dst.IP.To4()
+ } else {
+ dstData = route.Dst.IP.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData))
+ }
+
+ if route.Src != nil {
+ srcFamily := nl.GetIPFamily(route.Src)
+ if family != -1 && family != srcFamily {
+ return fmt.Errorf("source and destination ip are not the same IP family")
+ }
+ family = srcFamily
+ var srcData []byte
+ if srcFamily == FAMILY_V4 {
+ srcData = route.Src.To4()
+ } else {
+ srcData = route.Src.To16()
+ }
+ // The commonly used src ip for routes is actually PREFSRC
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData))
+ }
+
+ if route.Gw != nil {
+ gwFamily := nl.GetIPFamily(route.Gw)
+ if family != -1 && family != gwFamily {
+ return fmt.Errorf("gateway, source, and destination ip are not the same IP family")
+ }
+ family = gwFamily
+ var gwData []byte
+ if gwFamily == FAMILY_V4 {
+ gwData = route.Gw.To4()
+ } else {
+ gwData = route.Gw.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData))
+ }
+
+ msg.Family = uint8(family)
+
+ req.AddData(msg)
+ for _, attr := range rtAttrs {
+ req.AddData(attr)
+ }
+
+ var (
+ b = make([]byte, 4)
+ native = nl.NativeEndian()
+ )
+ native.PutUint32(b, uint32(route.LinkIndex))
+
+ req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b))
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// RouteList gets a list of routes in the system.
+// Equivalent to: `ip route show`.
+// The list can be filtered by link and ip family.
+func RouteList(link Link, family int) ([]Route, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+ if err != nil {
+ return nil, err
+ }
+
+ index := 0
+ if link != nil {
+ base := link.Attrs()
+ ensureIndex(base)
+ index = base.Index
+ }
+
+ native := nl.NativeEndian()
+ var res []Route
+MsgLoop:
+ for _, m := range msgs {
+ msg := nl.DeserializeRtMsg(m)
+
+ if msg.Flags&syscall.RTM_F_CLONED != 0 {
+ // Ignore cloned routes
+ continue
+ }
+
+ if msg.Table != syscall.RT_TABLE_MAIN {
+ // Ignore non-main tables
+ continue
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ route := Route{Scope: Scope(msg.Scope)}
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.RTA_GATEWAY:
+ route.Gw = net.IP(attr.Value)
+ case syscall.RTA_PREFSRC:
+ route.Src = net.IP(attr.Value)
+ case syscall.RTA_DST:
+ route.Dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+ }
+ case syscall.RTA_OIF:
+ routeIndex := int(native.Uint32(attr.Value[0:4]))
+ if link != nil && routeIndex != index {
+ // Ignore routes from other interfaces
+ continue MsgLoop
+ }
+ route.LinkIndex = routeIndex
+ }
+ }
+ res = append(res, route)
+ }
+
+ return res, nil
+}
+
+// RouteGet gets a route to a specific destination from the host system.
+// Equivalent to: 'ip route get'.
+func RouteGet(destination net.IP) ([]Route, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST)
+ family := nl.GetIPFamily(destination)
+ var destinationData []byte
+ var bitlen uint8
+ if family == FAMILY_V4 {
+ destinationData = destination.To4()
+ bitlen = 32
+ } else {
+ destinationData = destination.To16()
+ bitlen = 128
+ }
+ msg := &nl.RtMsg{}
+ msg.Family = uint8(family)
+ msg.Dst_len = bitlen
+ req.AddData(msg)
+
+ rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData)
+ req.AddData(rtaDst)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+ if err != nil {
+ return nil, err
+ }
+
+ native := nl.NativeEndian()
+ var res []Route
+ for _, m := range msgs {
+ msg := nl.DeserializeRtMsg(m)
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ route := Route{}
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.RTA_GATEWAY:
+ route.Gw = net.IP(attr.Value)
+ case syscall.RTA_PREFSRC:
+ route.Src = net.IP(attr.Value)
+ case syscall.RTA_DST:
+ route.Dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+ }
+ case syscall.RTA_OIF:
+ routeIndex := int(native.Uint32(attr.Value[0:4]))
+ route.LinkIndex = routeIndex
+ }
+ }
+ res = append(res, route)
+ }
+ return res, nil
+
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm.go
new file mode 100644
index 0000000..621ffb6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm.go
@@ -0,0 +1,64 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// Proto is an enum representing an ipsec protocol.
+type Proto uint8
+
+const (
+ XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING
+ XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP
+ XFRM_PROTO_AH Proto = syscall.IPPROTO_AH
+ XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS
+ XFRM_PROTO_COMP Proto = syscall.IPPROTO_COMP
+ XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW
+)
+
+func (p Proto) String() string {
+ switch p {
+ case XFRM_PROTO_ROUTE2:
+ return "route2"
+ case XFRM_PROTO_ESP:
+ return "esp"
+ case XFRM_PROTO_AH:
+ return "ah"
+ case XFRM_PROTO_HAO:
+ return "hao"
+ case XFRM_PROTO_COMP:
+ return "comp"
+ case XFRM_PROTO_IPSEC_ANY:
+ return "ipsec-any"
+ }
+ return fmt.Sprintf("%d", p)
+}
+
+// Mode is an enum representing an ipsec transport.
+type Mode uint8
+
+const (
+ XFRM_MODE_TRANSPORT Mode = iota
+ XFRM_MODE_TUNNEL
+ XFRM_MODE_ROUTEOPTIMIZATION
+ XFRM_MODE_IN_TRIGGER
+ XFRM_MODE_BEET
+ XFRM_MODE_MAX
+)
+
+func (m Mode) String() string {
+ switch m {
+ case XFRM_MODE_TRANSPORT:
+ return "transport"
+ case XFRM_MODE_TUNNEL:
+ return "tunnel"
+ case XFRM_MODE_ROUTEOPTIMIZATION:
+ return "ro"
+ case XFRM_MODE_IN_TRIGGER:
+ return "in_trigger"
+ case XFRM_MODE_BEET:
+ return "beet"
+ }
+ return fmt.Sprintf("%d", m)
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy.go
new file mode 100644
index 0000000..d85c65d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy.go
@@ -0,0 +1,59 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Dir is an enum representing an ipsec template direction.
+type Dir uint8
+
+const (
+ XFRM_DIR_IN Dir = iota
+ XFRM_DIR_OUT
+ XFRM_DIR_FWD
+ XFRM_SOCKET_IN
+ XFRM_SOCKET_OUT
+ XFRM_SOCKET_FWD
+)
+
+func (d Dir) String() string {
+ switch d {
+ case XFRM_DIR_IN:
+ return "dir in"
+ case XFRM_DIR_OUT:
+ return "dir out"
+ case XFRM_DIR_FWD:
+ return "dir fwd"
+ case XFRM_SOCKET_IN:
+ return "socket in"
+ case XFRM_SOCKET_OUT:
+ return "socket out"
+ case XFRM_SOCKET_FWD:
+ return "socket fwd"
+ }
+ return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
+}
+
+// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
+// policy. These rules are matched with XfrmState to determine encryption
+// and authentication algorithms.
+type XfrmPolicyTmpl struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Reqid int
+}
+
+// XfrmPolicy represents an ipsec policy. It represents the overlay network
+// and has a list of XfrmPolicyTmpls representing the base addresses of
+// the policy.
+type XfrmPolicy struct {
+ Dst *net.IPNet
+ Src *net.IPNet
+ Dir Dir
+ Priority int
+ Index int
+ Tmpls []XfrmPolicyTmpl
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
new file mode 100644
index 0000000..2daf6dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -0,0 +1,127 @@
+package netlink
+
+import (
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
+ sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP))
+ sel.Daddr.FromIP(policy.Dst.IP)
+ sel.Saddr.FromIP(policy.Src.IP)
+ prefixlenD, _ := policy.Dst.Mask.Size()
+ sel.PrefixlenD = uint8(prefixlenD)
+ prefixlenS, _ := policy.Src.Mask.Size()
+ sel.PrefixlenS = uint8(prefixlenS)
+}
+
+// XfrmPolicyAdd will add an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy add $policy`
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWPOLICY, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyInfo{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Priority = uint32(policy.Priority)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ msg.Lft.SoftByteLimit = nl.XFRM_INF
+ msg.Lft.HardByteLimit = nl.XFRM_INF
+ msg.Lft.SoftPacketLimit = nl.XFRM_INF
+ msg.Lft.HardPacketLimit = nl.XFRM_INF
+ req.AddData(msg)
+
+ tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls))
+ for i, tmpl := range policy.Tmpls {
+ start := i * nl.SizeofXfrmUserTmpl
+ userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl])
+ userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst)
+ userTmpl.Saddr.FromIP(tmpl.Src)
+ userTmpl.XfrmId.Proto = uint8(tmpl.Proto)
+ userTmpl.Mode = uint8(tmpl.Mode)
+ userTmpl.Reqid = uint32(tmpl.Reqid)
+ userTmpl.Aalgos = ^uint32(0)
+ userTmpl.Ealgos = ^uint32(0)
+ userTmpl.Calgos = ^uint32(0)
+ }
+ if len(tmplData) > 0 {
+ tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData)
+ req.AddData(tmpls)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmPolicyDel will delete an xfrm policy from the system. Note that
+// the Tmpls are ignored when matching the policy to delete.
+// Equivalent to: `ip xfrm policy del $policy`
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELPOLICY, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyId{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmPolicyList gets a list of xfrm policies in the system.
+// Equivalent to: `ip xfrm policy show`.
+// The list can be filtered by ip family.
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []XfrmPolicy
+ for _, m := range msgs {
+ msg := nl.DeserializeXfrmUserpolicyInfo(m)
+
+ if family != FAMILY_ALL && family != int(msg.Sel.Family) {
+ continue
+ }
+
+ var policy XfrmPolicy
+
+ policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
+ policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
+ policy.Priority = int(msg.Priority)
+ policy.Index = int(msg.Index)
+ policy.Dir = Dir(msg.Dir)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_TMPL:
+ max := len(attr.Value)
+ for i := 0; i < max; i += nl.SizeofXfrmUserTmpl {
+ var resTmpl XfrmPolicyTmpl
+ tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl])
+ resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP()
+ resTmpl.Src = tmpl.Saddr.ToIP()
+ resTmpl.Proto = Proto(tmpl.XfrmId.Proto)
+ resTmpl.Mode = Mode(tmpl.Mode)
+ resTmpl.Reqid = int(tmpl.Reqid)
+ policy.Tmpls = append(policy.Tmpls, resTmpl)
+ }
+ }
+ }
+ res = append(res, policy)
+ }
+ return res, nil
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state.go
new file mode 100644
index 0000000..5b8f2df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state.go
@@ -0,0 +1,53 @@
+package netlink
+
+import (
+ "net"
+)
+
+// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
+type XfrmStateAlgo struct {
+ Name string
+ Key []byte
+ TruncateLen int // Auth only
+}
+
+// EncapType is an enum representing an ipsec template direction.
+type EncapType uint8
+
+const (
+ XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1
+ XFRM_ENCAP_ESPINUDP
+)
+
+func (e EncapType) String() string {
+ switch e {
+ case XFRM_ENCAP_ESPINUDP_NONIKE:
+ return "espinudp-nonike"
+ case XFRM_ENCAP_ESPINUDP:
+ return "espinudp"
+ }
+ return "unknown"
+}
+
+// XfrmEncap represents the encapsulation to use for the ipsec encryption.
+type XfrmStateEncap struct {
+ Type EncapType
+ SrcPort int
+ DstPort int
+ OriginalAddress net.IP
+}
+
+// XfrmState represents the state of an ipsec policy. It optionally
+// contains an XfrmStateAlgo for encryption and one for authentication.
+type XfrmState struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Spi int
+ Reqid int
+ ReplayWindow int
+ Auth *XfrmStateAlgo
+ Crypt *XfrmStateAlgo
+ Encap *XfrmStateEncap
+}
diff --git a/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state_linux.go b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state_linux.go
new file mode 100644
index 0000000..5f44ec8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -0,0 +1,181 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func writeStateAlgo(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgo{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgoAuth{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgTruncLen: uint32(a.TruncateLen),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+// XfrmStateAdd will add an xfrm state to the system.
+// Equivalent to: `ip xfrm state add $state`
+func XfrmStateAdd(state *XfrmState) error {
+ // A state with spi 0 can't be deleted so don't allow it to be set
+ if state.Spi == 0 {
+ return fmt.Errorf("Spi must be set when adding xfrm state.")
+ }
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWSA, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUsersaInfo{}
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Id.Daddr.FromIP(state.Dst)
+ msg.Saddr.FromIP(state.Src)
+ msg.Id.Proto = uint8(state.Proto)
+ msg.Mode = uint8(state.Mode)
+ msg.Id.Spi = nl.Swap32(uint32(state.Spi))
+ msg.Reqid = uint32(state.Reqid)
+ msg.ReplayWindow = uint8(state.ReplayWindow)
+ msg.Lft.SoftByteLimit = nl.XFRM_INF
+ msg.Lft.HardByteLimit = nl.XFRM_INF
+ msg.Lft.SoftPacketLimit = nl.XFRM_INF
+ msg.Lft.HardPacketLimit = nl.XFRM_INF
+ req.AddData(msg)
+
+ if state.Auth != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth))
+ req.AddData(out)
+ }
+ if state.Crypt != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt))
+ req.AddData(out)
+ }
+ if state.Encap != nil {
+ encapData := make([]byte, nl.SizeofXfrmEncapTmpl)
+ encap := nl.DeserializeXfrmEncapTmpl(encapData)
+ encap.EncapType = uint16(state.Encap.Type)
+ encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort))
+ encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort))
+ encap.EncapOa.FromIP(state.Encap.OriginalAddress)
+ out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData)
+ req.AddData(out)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmStateDel will delete an xfrm state from the system. Note that
+// the Algos are ignored when matching the state to delete.
+// Equivalent to: `ip xfrm state del $state`
+func XfrmStateDel(state *XfrmState) error {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELSA, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUsersaId{}
+ msg.Daddr.FromIP(state.Dst)
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Proto = uint8(state.Proto)
+ msg.Spi = nl.Swap32(uint32(state.Spi))
+ req.AddData(msg)
+
+ saddr := nl.XfrmAddress{}
+ saddr.FromIP(state.Src)
+ srcdata := nl.NewRtAttr(nl.XFRMA_SRCADDR, saddr.Serialize())
+
+ req.AddData(srcdata)
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmStateList gets a list of xfrm states in the system.
+// Equivalent to: `ip xfrm state show`.
+// The list can be filtered by ip family.
+func XfrmStateList(family int) ([]XfrmState, error) {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []XfrmState
+ for _, m := range msgs {
+ msg := nl.DeserializeXfrmUsersaInfo(m)
+
+ if family != FAMILY_ALL && family != int(msg.Family) {
+ continue
+ }
+
+ var state XfrmState
+
+ state.Dst = msg.Id.Daddr.ToIP()
+ state.Src = msg.Saddr.ToIP()
+ state.Proto = Proto(msg.Id.Proto)
+ state.Mode = Mode(msg.Mode)
+ state.Spi = int(nl.Swap32(msg.Id.Spi))
+ state.Reqid = int(msg.Reqid)
+ state.ReplayWindow = int(msg.ReplayWindow)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
+ var resAlgo *XfrmStateAlgo
+ if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ resAlgo = state.Auth
+ } else {
+ state.Crypt = new(XfrmStateAlgo)
+ resAlgo = state.Crypt
+ }
+ algo := nl.DeserializeXfrmAlgo(attr.Value[:])
+ (*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
+ (*resAlgo).Key = algo.AlgKey
+ case nl.XFRMA_ALG_AUTH_TRUNC:
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
+ state.Auth.Name = nl.BytesToString(algo.AlgName[:])
+ state.Auth.Key = algo.AlgKey
+ state.Auth.TruncateLen = int(algo.AlgTruncLen)
+ case nl.XFRMA_ENCAP:
+ encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
+ state.Encap = new(XfrmStateEncap)
+ state.Encap.Type = EncapType(encap.EncapType)
+ state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
+ state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
+ state.Encap.OriginalAddress = encap.EncapOa.ToIP()
+ }
+
+ }
+ res = append(res, state)
+ }
+ return res, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/exp/LICENSE b/Godeps/_workspace/src/golang.org/x/exp/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/golang.org/x/exp/PATENTS b/Godeps/_workspace/src/golang.org/x/exp/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/Godeps/_workspace/src/golang.org/x/exp/inotify/inotify_linux.go b/Godeps/_workspace/src/golang.org/x/exp/inotify/inotify_linux.go
new file mode 100644
index 0000000..41ac558
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/exp/inotify/inotify_linux.go
@@ -0,0 +1,300 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package inotify implements a wrapper for the Linux inotify system.
+
+Example:
+ watcher, err := inotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = watcher.Watch("/tmp")
+ if err != nil {
+ log.Fatal(err)
+ }
+ for {
+ select {
+ case ev := <-watcher.Event:
+ log.Println("event:", ev)
+ case err := <-watcher.Error:
+ log.Println("error:", err)
+ }
+ }
+
+*/
+package inotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+type Event struct {
+ Mask uint32 // Mask of events
+ Cookie uint32 // Unique cookie associating related events (for rename(2))
+ Name string // File name (optional)
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+type Watcher struct {
+ mu sync.Mutex
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ Error chan error // Errors are sent on this channel
+ Event chan *Event // Events are returned on this channel
+ done chan bool // Channel for sending a "quit message" to the reader goroutine
+ isClosed bool // Set to true when Close() is first called
+}
+
+// NewWatcher creates and returns a new inotify instance using inotify_init(2)
+func NewWatcher() (*Watcher, error) {
+ fd, errno := syscall.InotifyInit()
+ if fd == -1 {
+ return nil, os.NewSyscallError("inotify_init", errno)
+ }
+ w := &Watcher{
+ fd: fd,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Event: make(chan *Event),
+ Error: make(chan error),
+ done: make(chan bool, 1),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close closes an inotify watcher instance
+// It sends a message to the reader goroutine to quit and removes all watches
+// associated with the inotify instance
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ w.done <- true
+ for path := range w.watches {
+ w.RemoveWatch(path)
+ }
+
+ return nil
+}
+
+// AddWatch adds path to the watched file set.
+// The flags are interpreted as described in inotify_add_watch(2).
+func (w *Watcher) AddWatch(path string, flags uint32) error {
+ if w.isClosed {
+ return errors.New("inotify instance already closed")
+ }
+
+ watchEntry, found := w.watches[path]
+ if found {
+ watchEntry.flags |= flags
+ flags |= syscall.IN_MASK_ADD
+ }
+
+ w.mu.Lock() // synchronize with readEvents goroutine
+
+ wd, err := syscall.InotifyAddWatch(w.fd, path, flags)
+ if err != nil {
+ w.mu.Unlock()
+ return &os.PathError{
+ Op: "inotify_add_watch",
+ Path: path,
+ Err: err,
+ }
+ }
+
+ if !found {
+ w.watches[path] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = path
+ }
+ w.mu.Unlock()
+ return nil
+}
+
+// Watch adds path to the watched file set, watching all events.
+func (w *Watcher) Watch(path string) error {
+ return w.AddWatch(path, IN_ALL_EVENTS)
+}
+
+// RemoveWatch removes path from the watched file set.
+func (w *Watcher) RemoveWatch(path string) error {
+ watch, ok := w.watches[path]
+ if !ok {
+ return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path))
+ }
+ success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ return os.NewSyscallError("inotify_rm_watch", errno)
+ }
+ delete(w.watches, path)
+ return nil
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Event channel
+func (w *Watcher) readEvents() {
+ var buf [syscall.SizeofInotifyEvent * 4096]byte
+
+ for {
+ n, err := syscall.Read(w.fd, buf[:])
+ // See if there is a message on the "done" channel
+ var done bool
+ select {
+ case done = <-w.done:
+ default:
+ }
+
+ // If EOF or a "done" message is received
+ if n == 0 || done {
+ // The syscall.Close can be slow. Close
+ // w.Event first.
+ close(w.Event)
+ err := syscall.Close(w.fd)
+ if err != nil {
+ w.Error <- os.NewSyscallError("close", err)
+ }
+ close(w.Error)
+ return
+ }
+ if n < 0 {
+ w.Error <- os.NewSyscallError("read", err)
+ continue
+ }
+ if n < syscall.SizeofInotifyEvent {
+ w.Error <- errors.New("inotify: short read in readEvents()")
+ continue
+ }
+
+ var offset uint32 = 0
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-syscall.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+ event := new(Event)
+ event.Mask = uint32(raw.Mask)
+ event.Cookie = uint32(raw.Cookie)
+ nameLen := uint32(raw.Len)
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ event.Name = w.paths[int(raw.Wd)]
+ w.mu.Unlock()
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
+ // The filename is padded with NUL bytes. TrimRight() gets rid of those.
+ event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+ // Send the event on the events channel
+ w.Event <- event
+
+ // Move to the next event in the buffer
+ offset += syscall.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// String formats the event e in the form
+// "filename: 0xEventMask = IN_ACCESS|IN_ATTRIB_|..."
+func (e *Event) String() string {
+ var events string = ""
+
+ m := e.Mask
+ for _, b := range eventBits {
+ if m&b.Value == b.Value {
+ m &^= b.Value
+ events += "|" + b.Name
+ }
+ }
+
+ if m != 0 {
+ events += fmt.Sprintf("|%#x", m)
+ }
+ if len(events) > 0 {
+ events = " == " + events[1:]
+ }
+
+ return fmt.Sprintf("%q: %#x%s", e.Name, e.Mask, events)
+}
+
+const (
+ // Options for inotify_init() are not exported
+ // IN_CLOEXEC uint32 = syscall.IN_CLOEXEC
+ // IN_NONBLOCK uint32 = syscall.IN_NONBLOCK
+
+ // Options for AddWatch
+ IN_DONT_FOLLOW uint32 = syscall.IN_DONT_FOLLOW
+ IN_ONESHOT uint32 = syscall.IN_ONESHOT
+ IN_ONLYDIR uint32 = syscall.IN_ONLYDIR
+
+ // The "IN_MASK_ADD" option is not exported, as AddWatch
+ // adds it automatically, if there is already a watch for the given path
+ // IN_MASK_ADD uint32 = syscall.IN_MASK_ADD
+
+ // Events
+ IN_ACCESS uint32 = syscall.IN_ACCESS
+ IN_ALL_EVENTS uint32 = syscall.IN_ALL_EVENTS
+ IN_ATTRIB uint32 = syscall.IN_ATTRIB
+ IN_CLOSE uint32 = syscall.IN_CLOSE
+ IN_CLOSE_NOWRITE uint32 = syscall.IN_CLOSE_NOWRITE
+ IN_CLOSE_WRITE uint32 = syscall.IN_CLOSE_WRITE
+ IN_CREATE uint32 = syscall.IN_CREATE
+ IN_DELETE uint32 = syscall.IN_DELETE
+ IN_DELETE_SELF uint32 = syscall.IN_DELETE_SELF
+ IN_MODIFY uint32 = syscall.IN_MODIFY
+ IN_MOVE uint32 = syscall.IN_MOVE
+ IN_MOVED_FROM uint32 = syscall.IN_MOVED_FROM
+ IN_MOVED_TO uint32 = syscall.IN_MOVED_TO
+ IN_MOVE_SELF uint32 = syscall.IN_MOVE_SELF
+ IN_OPEN uint32 = syscall.IN_OPEN
+
+ // Special events
+ IN_ISDIR uint32 = syscall.IN_ISDIR
+ IN_IGNORED uint32 = syscall.IN_IGNORED
+ IN_Q_OVERFLOW uint32 = syscall.IN_Q_OVERFLOW
+ IN_UNMOUNT uint32 = syscall.IN_UNMOUNT
+)
+
+var eventBits = []struct {
+ Value uint32
+ Name string
+}{
+ {IN_ACCESS, "IN_ACCESS"},
+ {IN_ATTRIB, "IN_ATTRIB"},
+ {IN_CLOSE, "IN_CLOSE"},
+ {IN_CLOSE_NOWRITE, "IN_CLOSE_NOWRITE"},
+ {IN_CLOSE_WRITE, "IN_CLOSE_WRITE"},
+ {IN_CREATE, "IN_CREATE"},
+ {IN_DELETE, "IN_DELETE"},
+ {IN_DELETE_SELF, "IN_DELETE_SELF"},
+ {IN_MODIFY, "IN_MODIFY"},
+ {IN_MOVE, "IN_MOVE"},
+ {IN_MOVED_FROM, "IN_MOVED_FROM"},
+ {IN_MOVED_TO, "IN_MOVED_TO"},
+ {IN_MOVE_SELF, "IN_MOVE_SELF"},
+ {IN_OPEN, "IN_OPEN"},
+ {IN_ISDIR, "IN_ISDIR"},
+ {IN_IGNORED, "IN_IGNORED"},
+ {IN_Q_OVERFLOW, "IN_Q_OVERFLOW"},
+ {IN_UNMOUNT, "IN_UNMOUNT"},
+}
diff --git a/Godeps/_workspace/src/golang.org/x/net/LICENSE b/Godeps/_workspace/src/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/golang.org/x/net/PATENTS b/Godeps/_workspace/src/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go
new file mode 100644
index 0000000..11bd8d3
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/net/context/context.go
@@ -0,0 +1,447 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out <-chan Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, &c)
+ return &c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) cancelCtx {
+ return cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return &c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 0000000..a035125
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/golang.org/x"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+ - go get -v -t -d golang.org/x/oauth2/...
+
+script:
+ - go test -v golang.org/x/oauth2/...
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 0000000..88dff59
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
new file mode 100644
index 0000000..d02f24f
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/Godeps/_workspace/src/golang.org/x/oauth2/README.md
new file mode 100644
index 0000000..a5afeca
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/README.md
@@ -0,0 +1,64 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means its no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+ import (
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ newappengine "google.golang.org/appengine"
+ newurlftech "google.golang.org/urlfetch"
+
+ "appengine"
+ )
+
+ func handler(w http.ResponseWriter, r *http.Request) {
+ var c appengine.Context = appengine.NewContext(r)
+ c.Infof("Logging a message with the old package")
+
+ var ctx context.Context = newappengine.NewContext(r)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "scope"),
+ Base: &newurlfetch.Transport{Context: ctx},
+ },
+ }
+ client.Get("...")
+ }
+
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 0000000..10aaf91
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,24 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+ registerContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+ return urlfetch.Client(ctx), nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go
new file mode 100644
index 0000000..9c816ff
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package facebook provides constants for using OAuth2 to access Facebook.
+package facebook
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Facebook's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.facebook.com/dialog/oauth",
+ TokenURL: "https://graph.facebook.com/oauth/access_token",
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
new file mode 100644
index 0000000..82ca623
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
@@ -0,0 +1,16 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package github provides constants for using OAuth2 to access Github.
+package github
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Github's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://github.com/login/oauth/authorize",
+ TokenURL: "https://github.com/login/oauth/access_token",
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 0000000..65dc347
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,83 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+)
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+ scopes := append([]string{}, scope...)
+ sort.Strings(scopes)
+ return &appEngineTokenSource{
+ ctx: ctx,
+ scopes: scopes,
+ key: strings.Join(scopes, " "),
+ }
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+ aeTokensMu sync.Mutex
+ aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while fetching or updating t
+ t *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+ ctx context.Context
+ scopes []string
+ key string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+ if appengineTokenFunc == nil {
+ panic("google: AppEngineTokenSource can only be used on App Engine.")
+ }
+
+ aeTokensMu.Lock()
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go
new file mode 100644
index 0000000..2f9b154
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+ appengineTokenFunc = appengine.AccessToken
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go
new file mode 100644
index 0000000..817bfb7
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go
@@ -0,0 +1,154 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+//
+// This client should be used when developing services
+// that run on Google App Engine or Google Compute Engine
+// and use "Application Default Credentials."
+//
+// For more details, see:
+// https://developers.google.com/accounts/application-default-credentials
+//
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+ ts, err := DefaultTokenSource(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource is a token source that uses
+// "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+// 1. A JSON file whose path is specified by the
+// GOOGLE_APPLICATION_CREDENTIALS environment variable.
+// 2. A JSON file in a location known to the gcloud command-line tool.
+// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// 3. On Google App Engine it uses the appengine.AccessToken function.
+// 4. On Google Compute Engine, it fetches credentials from the metadata server.
+// (In this final case any provided scopes are ignored.)
+//
+// For more details, see:
+// https://developers.google.com/accounts/application-default-credentials
+//
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+ // First, try the environment variable.
+ const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+ if filename := os.Getenv(envVar); filename != "" {
+ ts, err := tokenSourceFromFile(ctx, filename, scope)
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+ }
+ return ts, nil
+ }
+
+ // Second, try a well-known file.
+ filename := wellKnownFile()
+ _, err := os.Stat(filename)
+ if err == nil {
+ ts, err2 := tokenSourceFromFile(ctx, filename, scope)
+ if err2 == nil {
+ return ts, nil
+ }
+ err = err2
+ } else if os.IsNotExist(err) {
+ err = nil // ignore this error
+ }
+ if err != nil {
+ return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+ }
+
+ // Third, if we're on Google App Engine use those credentials.
+ if appengineTokenFunc != nil {
+ return AppEngineTokenSource(ctx, scope...), nil
+ }
+
+ // Fourth, if we're on Google Compute Engine use the metadata server.
+ if metadata.OnGCE() {
+ return ComputeTokenSource(""), nil
+ }
+
+ // None are found; return helpful error.
+ const url = "https://developers.google.com/accounts/application-default-credentials"
+ return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+ const f = "application_default_credentials.json"
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+ }
+ return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ var d struct {
+ // Common fields
+ Type string
+ ClientID string `json:"client_id"`
+
+ // User Credential fields
+ ClientSecret string `json:"client_secret"`
+ RefreshToken string `json:"refresh_token"`
+
+ // Service Account fields
+ ClientEmail string `json:"client_email"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(b, &d); err != nil {
+ return nil, err
+ }
+ switch d.Type {
+ case "authorized_user":
+ cfg := &oauth2.Config{
+ ClientID: d.ClientID,
+ ClientSecret: d.ClientSecret,
+ Scopes: append([]string{}, scopes...), // copy
+ Endpoint: Endpoint,
+ }
+ tok := &oauth2.Token{RefreshToken: d.RefreshToken}
+ return cfg.TokenSource(ctx, tok), nil
+ case "service_account":
+ cfg := &jwt.Config{
+ Email: d.ClientEmail,
+ PrivateKey: []byte(d.PrivateKey),
+ Scopes: append([]string{}, scopes...), // copy
+ TokenURL: JWTTokenURL,
+ }
+ return cfg.TokenSource(ctx), nil
+ case "":
+ return nil, errors.New("missing 'type' field in credentials")
+ default:
+ return nil, fmt.Errorf("unknown credential type: %q", d.Type)
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
new file mode 100644
index 0000000..a562185
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/application-default-credentials.
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloadable from https://console.developers.google.com,
+// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
+// JSON format and provide the contents of the file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+ type cred struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RedirectURIs []string `json:"redirect_uris"`
+ AuthURI string `json:"auth_uri"`
+ TokenURI string `json:"token_uri"`
+ }
+ var j struct {
+ Web *cred `json:"web"`
+ Installed *cred `json:"installed"`
+ }
+ if err := json.Unmarshal(jsonKey, &j); err != nil {
+ return nil, err
+ }
+ var c *cred
+ switch {
+ case j.Web != nil:
+ c = j.Web
+ case j.Installed != nil:
+ c = j.Installed
+ default:
+ return nil, fmt.Errorf("oauth2/google: no credentials found")
+ }
+ if len(c.RedirectURIs) < 1 {
+ return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+ }
+ return &oauth2.Config{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RedirectURL: c.RedirectURIs[0],
+ Scopes: scope,
+ Endpoint: oauth2.Endpoint{
+ AuthURL: c.AuthURI,
+ TokenURL: c.TokenURI,
+ },
+ }, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" page under "APIs & Auth" for your
+// project at https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+ var key struct {
+ Email string `json:"client_email"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(jsonKey, &key); err != nil {
+ return nil, err
+ }
+ return &jwt.Config{
+ Email: key.Email,
+ PrivateKey: []byte(key.PrivateKey),
+ Scopes: scope,
+ TokenURL: JWTTokenURL,
+ }, nil
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+ account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+ if !metadata.OnGCE() {
+ return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+ }
+ acct := cs.account
+ if acct == "" {
+ acct = "default"
+ }
+ tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ if err != nil {
+ return nil, err
+ }
+ var res struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+ }
+ err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+ }
+ return &oauth2.Token{
+ AccessToken: res.AccessToken,
+ TokenType: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ }, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go
new file mode 100644
index 0000000..01ba0ec
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+ Data []struct {
+ Credential struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+ TokenExpiry *time.Time `json:"token_expiry"`
+ } `json:"credential"`
+ Key struct {
+ Account string `json:"account"`
+ Scope string `json:"scope"`
+ } `json:"key"`
+ }
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+ conf oauth2.Config
+ initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+ configPath, err := sdkConfigPath()
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+ }
+ credentialsPath := filepath.Join(configPath, "credentials")
+ f, err := os.Open(credentialsPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+ }
+ defer f.Close()
+
+ var c sdkCredentials
+ if err := json.NewDecoder(f).Decode(&c); err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+ }
+ if len(c.Data) == 0 {
+ return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+ }
+ if account == "" {
+ propertiesPath := filepath.Join(configPath, "properties")
+ f, err := os.Open(propertiesPath)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+ }
+ defer f.Close()
+ ini, err := internal.ParseINI(f)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+ }
+ core, ok := ini["core"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+ }
+ active, ok := core["account"]
+ if !ok {
+ return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+ }
+ account = active
+ }
+
+ for _, d := range c.Data {
+ if account == "" || d.Key.Account == account {
+ if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+ return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+ }
+ var expiry time.Time
+ if d.Credential.TokenExpiry != nil {
+ expiry = *d.Credential.TokenExpiry
+ }
+ return &SDKConfig{
+ conf: oauth2.Config{
+ ClientID: d.Credential.ClientID,
+ ClientSecret: d.Credential.ClientSecret,
+ Scopes: strings.Split(d.Key.Scope, " "),
+ Endpoint: Endpoint,
+ RedirectURL: "oob",
+ },
+ initialToken: &oauth2.Token{
+ AccessToken: d.Credential.AccessToken,
+ RefreshToken: d.Credential.RefreshToken,
+ Expiry: expiry,
+ },
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &oauth2.Transport{
+ Source: c.TokenSource(ctx),
+ },
+ }
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+ return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+ }
+ homeDir := guessUnixHomeDir()
+ if homeDir == "" {
+ return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+ }
+ return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+ usr, err := user.Current()
+ if err == nil {
+ return usr.HomeDir
+ }
+ return os.Getenv("HOME")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 0000000..37571a1
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,69 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "bufio"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("private key is invalid")
+ }
+ return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+ result := map[string]map[string]string{
+ "": map[string]string{}, // root section
+ }
+ scanner := bufio.NewScanner(ini)
+ currentSection := ""
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, ";") {
+ // comment.
+ continue
+ }
+ if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+ currentSection = strings.TrimSpace(line[1 : len(line)-1])
+ result[currentSection] = map[string]string{}
+ continue
+ }
+ parts := strings.SplitN(line, "=", 2)
+ if len(parts) == 2 && parts[0] != "" {
+ result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning ini: %v", err)
+ }
+ return result, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 0000000..396b3fa
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,160 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides encoding and decoding utilities for
+// signed JWS messages.
+package jws
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+ Iss string `json:"iss"` // email address of the client_id of the application making the access token request
+ Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+ Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
+ Exp int64 `json:"exp"` // the expiration time of the assertion
+ Iat int64 `json:"iat"` // the time the assertion was issued.
+ Typ string `json:"typ,omitempty"` // token type (Optional).
+
+ // Email for which the application is requesting delegated access (Optional).
+ Sub string `json:"sub,omitempty"`
+
+ // The old name of Sub. Client keeps setting Prn to be
+ // complaint with legacy OAuth 2.0 providers. (Optional)
+ Prn string `json:"prn,omitempty"`
+
+ // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+ // This array is marshalled using custom code (see (c *ClaimSet) encode()).
+ PrivateClaims map[string]interface{} `json:"-"`
+
+ exp time.Time
+ iat time.Time
+}
+
+func (c *ClaimSet) encode() (string, error) {
+ if c.exp.IsZero() || c.iat.IsZero() {
+ // Reverting time back for machines whose time is not perfectly in sync.
+ // If client machine's time is in the future according
+ // to Google servers, an access token will not be issued.
+ now := time.Now().Add(-10 * time.Second)
+ c.iat = now
+ c.exp = now.Add(time.Hour)
+ }
+
+ c.Exp = c.exp.Unix()
+ c.Iat = c.iat.Unix()
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.PrivateClaims) == 0 {
+ return base64Encode(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.PrivateClaims)
+ if err != nil {
+ return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64Encode(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+ // The algorithm used for signature.
+ Algorithm string `json:"alg"`
+
+ // Represents the token type.
+ Typ string `json:"typ"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64Encode(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ // TODO(jbd): Provide more context about the error.
+ return nil, errors.New("jws: invalid token received")
+ }
+ decoded, err := base64Decode(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &ClaimSet{}
+ err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+ return c, err
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ cs, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, cs)
+ h := sha256.New()
+ h.Write([]byte(ss))
+ b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))
+ if err != nil {
+ return "", err
+ }
+ sig := base64Encode(b)
+ return fmt.Sprintf("%s.%s", ss, sig), nil
+}
+
+// base64Encode returns and Base64url encoded version of the input string with any
+// trailing "=" stripped.
+func base64Encode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// base64Decode decodes the Base64url encoded string
+func base64Decode(s string) ([]byte, error) {
+ // add back missing padding
+ switch len(s) % 4 {
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 0000000..205d23e
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+ // Email is the OAuth client identifier used when communicating with
+ // the configured OAuth provider.
+ Email string
+
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. The provided
+ // private key is used to sign JWT payloads.
+ // PEM containers with a passphrase are not supported.
+ // Use the following command to convert a PKCS 12 file into a PEM.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ PrivateKey []byte
+
+ // Subject is the optional user to impersonate.
+ Subject string
+
+ // Scopes optionally specifies a list of requested permission scopes.
+ Scopes []string
+
+ // TokenURL is the endpoint required to complete the 2-legged JWT flow.
+ TokenURL string
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+ ctx context.Context
+ conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+ pk, err := internal.ParseKey(js.conf.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ hc := oauth2.NewClient(js.ctx, nil)
+ claimSet := &jws.ClaimSet{
+ Iss: js.conf.Email,
+ Scope: strings.Join(js.conf.Scopes, " "),
+ Aud: js.conf.TokenURL,
+ }
+ if subject := js.conf.Subject; subject != "" {
+ claimSet.Sub = subject
+ // prn is the old name of sub. Keep setting it
+ // to be compatible with legacy OAuth 2.0 providers.
+ claimSet.Prn = subject
+ }
+ payload, err := jws.Encode(defaultHeader, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ resp, err := hc.PostForm(js.conf.TokenURL, v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ token := &oauth2.Token{
+ AccessToken: tokenRes.AccessToken,
+ TokenType: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token = token.WithExtra(raw)
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jws.Decode(v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ return token, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go
new file mode 100644
index 0000000..d93fded
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package linkedin provides constants for using OAuth2 to access LinkedIn.
+package linkedin
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is LinkedIn's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.linkedin.com/uas/oauth2/authorization",
+ TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken",
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 0000000..87d3f76
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,517 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+var NoContext = context.TODO()
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+}
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online is the default if neither is specified. If your
+ // application needs to refresh access tokens when the user
+ // is not present at the browser, then use offline. This will
+ // result in your application obtaining a refresh token the
+ // first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = setParam{"access_type", "online"}
+ AccessTypeOffline AuthCodeOption = setParam{"access_type", "offline"}
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = setParam{"approval_prompt", "force"}
+)
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ "redirect_uri": condVal(c.RedirectURL),
+ "scope": condVal(strings.Join(c.Scopes, " ")),
+ "state": condVal(state),
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"password"},
+ "username": {username},
+ "password": {password},
+ "scope": condVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ "redirect_uri": condVal(c.RedirectURL),
+ "scope": condVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// contextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error. If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type contextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []contextClientFunc
+
+func registerContextClientFunc(fn contextClientFunc) {
+ contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func contextClient(ctx context.Context) (*http.Client, error) {
+ for _, fn := range contextClientFuncs {
+ c, err := fn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ return c, nil
+ }
+ }
+ if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+ return hc, nil
+ }
+ return http.DefaultClient, nil
+}
+
+func contextTransport(ctx context.Context) http.RoundTripper {
+ hc, err := contextClient(ctx)
+ if err != nil {
+ // This is a rare error case (somebody using nil on App Engine),
+ // so I'd rather not everybody do an error check on this Client
+ // method. They can get the error that they're doing it wrong
+ // later, at client.Get/PostForm time.
+ return errorTransport{err}
+ }
+ return hc.Transport
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+ tkr := &tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ }
+ if t != nil {
+ tkr.refreshToken = t.RefreshToken
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: tkr,
+ }
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx context.Context // used to get HTTP requests
+ conf *Config
+ refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+ if tf.refreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+
+ tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tf.refreshToken},
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if tf.refreshToken != tk.RefreshToken {
+ tf.refreshToken = tk.RefreshToken
+ }
+ return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+ hc, err := contextClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ v.Set("client_id", c.ClientID)
+ bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL)
+ if bustedAuth && c.ClientSecret != "" {
+ v.Set("client_secret", c.ClientSecret)
+ }
+ req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if !bustedAuth {
+ req.SetBasicAuth(c.ClientID, c.ClientSecret)
+ }
+ r, err := hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ raw: vals,
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ // TODO(jbd): Facebook's OAuth2 implementation is broken and
+ // returns expires_in field in expires. Remove the fallback to expires,
+ // when Facebook fixes their implementation.
+ e = vals.Get("expires")
+ }
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.raw) // no error checks for optional fields
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, nil
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+ Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ if v := e.Expires; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ var n json.Number
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+ i, err := n.Int64()
+ if err != nil {
+ return err
+ }
+ *e = expirationTime(i)
+ return nil
+}
+
+func condVal(v string) []string {
+ if v == "" {
+ return nil
+ }
+ return []string{v}
+}
+
+var brokenAuthHeaderProviders = []string{
+ "https://accounts.google.com/",
+ "https://www.googleapis.com/",
+ "https://github.com/",
+ "https://api.instagram.com/",
+ "https://www.douban.com/",
+ "https://api.dropbox.com/",
+ "https://api.soundcloud.com/",
+ "https://www.linkedin.com/",
+ "https://api.twitch.tv/",
+ "https://oauth.vk.com/",
+ "https://api.odnoklassniki.ru/",
+ "https://connect.stripe.com/",
+ "https://api.pushbullet.com/",
+ "https://oauth.sandbox.trainingpeaks.com/",
+ "https://oauth.trainingpeaks.com/",
+ "https://www.strava.com/oauth/",
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+ for _, s := range brokenAuthHeaderProviders {
+ if strings.HasPrefix(tokenURL, s) {
+ // Some sites fail to implement the OAuth2 spec fully.
+ return false
+ }
+ }
+
+ // Assume the provider implements the spec properly
+ // otherwise. We can add more exceptions as they're
+ // discovered. We will _not_ be adding configurable hooks
+ // to this package to let users select server bugs.
+ return true
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient contextKey
+
+// contextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a contextKey, being unexported.
+type contextKey struct{}
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+ if src == nil {
+ c, err := contextClient(ctx)
+ if err != nil {
+ return &http.Client{Transport: errorTransport{err}}
+ }
+ return c
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: contextTransport(ctx),
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
new file mode 100644
index 0000000..f0b66f9
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.
+package odnoklassniki
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.odnoklassniki.ru/oauth/authorize",
+ TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go
new file mode 100644
index 0000000..a99366b
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package paypal provides constants for using OAuth2 to access PayPal.
+package paypal
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
+ TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
+}
+
+// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.
+var SandboxEndpoint = oauth2.Endpoint{
+ AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
+ TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/token.go
new file mode 100644
index 0000000..9852ceb
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/token.go
@@ -0,0 +1,104 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if vals, ok := t.raw.(url.Values); ok {
+ // TODO(jbd): Cast numeric values to int64 or float64.
+ return vals.Get(key)
+ }
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+ return nil
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go
new file mode 100644
index 0000000..10339a0
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go
@@ -0,0 +1,138 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
+
+type errorTransport struct{ err error }
+
+func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+ return nil, t.err
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go
new file mode 100644
index 0000000..00e9293
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vk provides constants for using OAuth2 to access VK.com.
+package vk
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is VK's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://oauth.vk.com/authorize",
+ TokenURL: "https://oauth.vk.com/access_token",
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/LICENSE b/Godeps/_workspace/src/google.golang.org/api/LICENSE
new file mode 100644
index 0000000..263aa7a
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/google.golang.org/api/bigquery/v2/bigquery-api.json b/Godeps/_workspace/src/google.golang.org/api/bigquery/v2/bigquery-api.json
new file mode 100644
index 0000000..339144b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/bigquery/v2/bigquery-api.json
@@ -0,0 +1,2282 @@
+{
+ "kind": "discovery#restDescription",
+ "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/NtZ5jtkXTc_PcGwBbw5OcARIHbE\"",
+ "discoveryVersion": "v1",
+ "id": "bigquery:v2",
+ "name": "bigquery",
+ "version": "v2",
+ "revision": "20150710",
+ "title": "BigQuery API",
+ "description": "A data platform for customers to create, manage, share and query data.",
+ "ownerDomain": "google.com",
+ "ownerName": "Google",
+ "icons": {
+ "x16": "https://www.google.com/images/icons/product/search-16.gif",
+ "x32": "https://www.google.com/images/icons/product/search-32.gif"
+ },
+ "documentationLink": "https://cloud.google.com/bigquery/",
+ "protocol": "rest",
+ "baseUrl": "https://www.googleapis.com/bigquery/v2/",
+ "basePath": "/bigquery/v2/",
+ "rootUrl": "https://www.googleapis.com/",
+ "servicePath": "bigquery/v2/",
+ "batchPath": "batch",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "csv",
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of text/csv",
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
+ "location": "query"
+ },
+ "userIp": {
+ "type": "string",
+ "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
+ "location": "query"
+ }
+ },
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://www.googleapis.com/auth/bigquery": {
+ "description": "View and manage your data in Google BigQuery"
+ },
+ "https://www.googleapis.com/auth/bigquery.insertdata": {
+ "description": "Insert data into Google BigQuery"
+ },
+ "https://www.googleapis.com/auth/cloud-platform": {
+ "description": "View and manage your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/devstorage.full_control": {
+ "description": "Manage your data and permissions in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_only": {
+ "description": "View your data in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_write": {
+ "description": "Manage your data in Google Cloud Storage"
+ }
+ }
+ }
+ },
+ "schemas": {
+ "CsvOptions": {
+ "id": "CsvOptions",
+ "type": "object",
+ "properties": {
+ "allowJaggedRows": {
+ "type": "boolean",
+ "description": "[Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false."
+ },
+ "allowQuotedNewlines": {
+ "type": "boolean",
+ "description": "[Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
+ },
+ "encoding": {
+ "type": "string",
+ "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties."
+ },
+ "fieldDelimiter": {
+ "type": "string",
+ "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')."
+ },
+ "quote": {
+ "type": "string",
+ "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.",
+ "default": "\"",
+ "pattern": ".?"
+ },
+ "skipLeadingRows": {
+ "type": "integer",
+ "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
+ "format": "int32"
+ }
+ }
+ },
+ "Dataset": {
+ "id": "Dataset",
+ "type": "object",
+ "properties": {
+ "access": {
+ "type": "array",
+ "description": "[Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;",
+ "items": {
+ "type": "object",
+ "properties": {
+ "domain": {
+ "type": "string",
+ "description": "[Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: \"example.com\"."
+ },
+ "groupByEmail": {
+ "type": "string",
+ "description": "[Pick one] An email address of a Google Group to grant access to."
+ },
+ "role": {
+ "type": "string",
+ "description": "[Required] Describes the rights granted to the user specified by the other member of the access object. The following string values are supported: READER, WRITER, OWNER."
+ },
+ "specialGroup": {
+ "type": "string",
+ "description": "[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users."
+ },
+ "userByEmail": {
+ "type": "string",
+ "description": "[Pick one] An email address of a user to grant access to. For example: fred@example.com."
+ },
+ "view": {
+ "$ref": "TableReference",
+ "description": "[Pick one] A view from a different dataset to grant access to. Queries executed against that view will have read access to tables in this dataset. The role field is not required when this field is set. If that view is updated by any user, access to the view needs to be granted again via an update operation."
+ }
+ }
+ }
+ },
+ "creationTime": {
+ "type": "string",
+ "description": "[Output-only] The time when this dataset was created, in milliseconds since the epoch.",
+ "format": "int64"
+ },
+ "datasetReference": {
+ "$ref": "DatasetReference",
+ "description": "[Required] A reference that identifies the dataset."
+ },
+ "defaultTableExpirationMs": {
+ "type": "string",
+ "description": "[Experimental] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.",
+ "format": "int64"
+ },
+ "description": {
+ "type": "string",
+ "description": "[Optional] A user-friendly description of the dataset."
+ },
+ "etag": {
+ "type": "string",
+ "description": "[Output-only] A hash of the resource."
+ },
+ "friendlyName": {
+ "type": "string",
+ "description": "[Optional] A descriptive name for the dataset."
+ },
+ "id": {
+ "type": "string",
+ "description": "[Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field."
+ },
+ "kind": {
+ "type": "string",
+ "description": "[Output-only] The resource type.",
+ "default": "bigquery#dataset"
+ },
+ "lastModifiedTime": {
+ "type": "string",
+ "description": "[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.",
+ "format": "int64"
+ },
+ "location": {
+ "type": "string",
+ "description": "[Experimental] The location where the data resides. If not present, the data will be stored in the US."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource."
+ }
+ }
+ },
+ "DatasetList": {
+ "id": "DatasetList",
+ "type": "object",
+ "properties": {
+ "datasets": {
+ "type": "array",
+ "description": "An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "datasetReference": {
+ "$ref": "DatasetReference",
+ "description": "The dataset reference. Use this property to access specific parts of the dataset's ID, such as project ID or dataset ID."
+ },
+ "friendlyName": {
+ "type": "string",
+ "description": "A descriptive name for the dataset, if one exists."
+ },
+ "id": {
+ "type": "string",
+ "description": "The fully-qualified, unique, opaque ID of the dataset."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type. This property always returns the value \"bigquery#dataset\".",
+ "default": "bigquery#dataset"
+ }
+ }
+ }
+ },
+ "etag": {
+ "type": "string",
+ "description": "A hash value of the results page. You can use this property to determine if the page has changed since the last request."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The list type. This property always returns the value \"bigquery#datasetList\".",
+ "default": "bigquery#datasetList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token that can be used to request the next results page. This property is omitted on the final results page."
+ }
+ }
+ },
+ "DatasetReference": {
+ "id": "DatasetReference",
+ "type": "object",
+ "properties": {
+ "datasetId": {
+ "type": "string",
+ "description": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
+ "annotations": {
+ "required": [
+ "bigquery.datasets.update"
+ ]
+ }
+ },
+ "projectId": {
+ "type": "string",
+ "description": "[Optional] The ID of the project containing this dataset.",
+ "annotations": {
+ "required": [
+ "bigquery.datasets.update"
+ ]
+ }
+ }
+ }
+ },
+ "ErrorProto": {
+ "id": "ErrorProto",
+ "type": "object",
+ "properties": {
+ "debugInfo": {
+ "type": "string",
+ "description": "Debugging information. This property is internal to Google and should not be used."
+ },
+ "location": {
+ "type": "string",
+ "description": "Specifies where the error occurred, if present."
+ },
+ "message": {
+ "type": "string",
+ "description": "A human-readable description of the error."
+ },
+ "reason": {
+ "type": "string",
+ "description": "A short error code that summarizes the error."
+ }
+ }
+ },
+ "ExternalDataConfiguration": {
+ "id": "ExternalDataConfiguration",
+ "type": "object",
+ "properties": {
+ "compression": {
+ "type": "string",
+ "description": "[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE."
+ },
+ "csvOptions": {
+ "$ref": "CsvOptions",
+ "description": "Additional properties to set if sourceFormat is set to CSV."
+ },
+ "ignoreUnknownValues": {
+ "type": "boolean",
+ "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns"
+ },
+ "maxBadRecords": {
+ "type": "integer",
+ "description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.",
+ "format": "int32"
+ },
+ "schema": {
+ "$ref": "TableSchema",
+ "description": "[Required] The schema for the data."
+ },
+ "sourceFormat": {
+ "type": "string",
+ "description": "[Optional] The data format. External data sources must be in CSV format. The default value is CSV."
+ },
+ "sourceUris": {
+ "type": "array",
+ "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. CSV limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "GetQueryResultsResponse": {
+ "id": "GetQueryResultsResponse",
+ "type": "object",
+ "properties": {
+ "cacheHit": {
+ "type": "boolean",
+ "description": "Whether the query result was fetched from the query cache."
+ },
+ "etag": {
+ "type": "string",
+ "description": "A hash of this response."
+ },
+ "jobComplete": {
+ "type": "boolean",
+ "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available."
+ },
+ "jobReference": {
+ "$ref": "JobReference",
+ "description": "Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the response.",
+ "default": "bigquery#getQueryResultsResponse"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A token used for paging results."
+ },
+ "rows": {
+ "type": "array",
+ "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.",
+ "items": {
+ "$ref": "TableRow"
+ }
+ },
+ "schema": {
+ "$ref": "TableSchema",
+ "description": "The schema of the results. Present only when the query completes successfully."
+ },
+ "totalBytesProcessed": {
+ "type": "string",
+ "description": "The total number of bytes processed for this query.",
+ "format": "int64"
+ },
+ "totalRows": {
+ "type": "string",
+ "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.",
+ "format": "uint64"
+ }
+ }
+ },
+ "Job": {
+ "id": "Job",
+ "type": "object",
+ "properties": {
+ "configuration": {
+ "$ref": "JobConfiguration",
+ "description": "[Required] Describes the job configuration."
+ },
+ "etag": {
+ "type": "string",
+ "description": "[Output-only] A hash of this resource."
+ },
+ "id": {
+ "type": "string",
+ "description": "[Output-only] Opaque ID field of the job"
+ },
+ "jobReference": {
+ "$ref": "JobReference",
+ "description": "[Optional] Reference describing the unique-per-user name of the job."
+ },
+ "kind": {
+ "type": "string",
+ "description": "[Output-only] The type of the resource.",
+ "default": "bigquery#job"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "[Output-only] A URL that can be used to access this resource again."
+ },
+ "statistics": {
+ "$ref": "JobStatistics",
+ "description": "[Output-only] Information about the job, including starting time and ending time of the job."
+ },
+ "status": {
+ "$ref": "JobStatus",
+ "description": "[Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete."
+ },
+ "user_email": {
+ "type": "string",
+ "description": "[Output-only] Email address of the user who ran the job."
+ }
+ }
+ },
+ "JobCancelResponse": {
+ "id": "JobCancelResponse",
+ "type": "object",
+ "properties": {
+ "job": {
+ "$ref": "Job",
+ "description": "The final state of the job."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the response.",
+ "default": "bigquery#jobCancelResponse"
+ }
+ }
+ },
+ "JobConfiguration": {
+ "id": "JobConfiguration",
+ "type": "object",
+ "properties": {
+ "copy": {
+ "$ref": "JobConfigurationTableCopy",
+ "description": "[Pick one] Copies a table."
+ },
+ "dryRun": {
+ "type": "boolean",
+ "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run. Behavior of non-query jobs is undefined."
+ },
+ "extract": {
+ "$ref": "JobConfigurationExtract",
+ "description": "[Pick one] Configures an extract job."
+ },
+ "link": {
+ "$ref": "JobConfigurationLink",
+ "description": "[Pick one] Configures a link job."
+ },
+ "load": {
+ "$ref": "JobConfigurationLoad",
+ "description": "[Pick one] Configures a load job."
+ },
+ "query": {
+ "$ref": "JobConfigurationQuery",
+ "description": "[Pick one] Configures a query job."
+ }
+ }
+ },
+ "JobConfigurationExtract": {
+ "id": "JobConfigurationExtract",
+ "type": "object",
+ "properties": {
+ "compression": {
+ "type": "string",
+ "description": "[Optional] The compression type to use for exported files. Possible values include GZIP and NONE. The default value is NONE."
+ },
+ "destinationFormat": {
+ "type": "string",
+ "description": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with nested or repeated fields cannot be exported as CSV."
+ },
+ "destinationUri": {
+ "type": "string",
+ "description": "[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as necessary. The fully-qualified Google Cloud Storage URI where the extracted table should be written."
+ },
+ "destinationUris": {
+ "type": "array",
+ "description": "[Pick one] A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "fieldDelimiter": {
+ "type": "string",
+ "description": "[Optional] Delimiter to use between fields in the exported data. Default is ','"
+ },
+ "printHeader": {
+ "type": "boolean",
+ "description": "[Optional] Whether to print out a header row in the results. Default is true.",
+ "default": "true"
+ },
+ "sourceTable": {
+ "$ref": "TableReference",
+ "description": "[Required] A reference to the table being exported."
+ }
+ }
+ },
+ "JobConfigurationLink": {
+ "id": "JobConfigurationLink",
+ "type": "object",
+ "properties": {
+ "createDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
+ },
+ "destinationTable": {
+ "$ref": "TableReference",
+ "description": "[Required] The destination table of the link job."
+ },
+ "sourceUri": {
+ "type": "array",
+ "description": "[Required] URI of source table to link.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "writeDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
+ }
+ }
+ },
+ "JobConfigurationLoad": {
+ "id": "JobConfigurationLoad",
+ "type": "object",
+ "properties": {
+ "allowJaggedRows": {
+ "type": "boolean",
+ "description": "[Optional] Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats."
+ },
+ "allowQuotedNewlines": {
+ "type": "boolean",
+ "description": "Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
+ },
+ "createDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
+ },
+ "destinationTable": {
+ "$ref": "TableReference",
+ "description": "[Required] The destination table to load the data into."
+ },
+ "encoding": {
+ "type": "string",
+ "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties."
+ },
+ "fieldDelimiter": {
+ "type": "string",
+ "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')."
+ },
+ "ignoreUnknownValues": {
+ "type": "boolean",
+ "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names"
+ },
+ "maxBadRecords": {
+ "type": "integer",
+ "description": "[Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.",
+ "format": "int32"
+ },
+ "projectionFields": {
+ "type": "array",
+ "description": "[Experimental] If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "quote": {
+ "type": "string",
+ "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.",
+ "default": "\"",
+ "pattern": ".?"
+ },
+ "schema": {
+ "$ref": "TableSchema",
+ "description": "[Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data."
+ },
+ "schemaInline": {
+ "type": "string",
+ "description": "[Deprecated] The inline schema. For CSV schemas, specify as \"Field1:Type1[,Field2:Type2]*\". For example, \"foo:STRING, bar:INTEGER, baz:FLOAT\"."
+ },
+ "schemaInlineFormat": {
+ "type": "string",
+ "description": "[Deprecated] The format of the schemaInline property."
+ },
+ "skipLeadingRows": {
+ "type": "integer",
+ "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
+ "format": "int32"
+ },
+ "sourceFormat": {
+ "type": "string",
+ "description": "[Optional] The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". The default value is CSV."
+ },
+ "sourceUris": {
+ "type": "array",
+ "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "writeDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
+ }
+ }
+ },
+ "JobConfigurationQuery": {
+ "id": "JobConfigurationQuery",
+ "type": "object",
+ "properties": {
+ "allowLargeResults": {
+ "type": "boolean",
+ "description": "If true, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set."
+ },
+ "createDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
+ },
+ "defaultDataset": {
+ "$ref": "DatasetReference",
+ "description": "[Optional] Specifies the default dataset to use for unqualified table names in the query."
+ },
+ "destinationTable": {
+ "$ref": "TableReference",
+ "description": "[Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results."
+ },
+ "flattenResults": {
+ "type": "boolean",
+ "description": "[Optional] Flattens all nested and repeated fields in the query results. The default value is true. allowLargeResults must be true if this is set to false.",
+ "default": "true"
+ },
+ "preserveNulls": {
+ "type": "boolean",
+ "description": "[Deprecated] This property is deprecated."
+ },
+ "priority": {
+ "type": "string",
+ "description": "[Optional] Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE."
+ },
+ "query": {
+ "type": "string",
+ "description": "[Required] BigQuery SQL query to execute."
+ },
+ "tableDefinitions": {
+ "type": "object",
+ "description": "[Experimental] If querying an external data source outside of BigQuery, describes the data format, location and other properties of the data source. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.",
+ "additionalProperties": {
+ "$ref": "ExternalDataConfiguration"
+ }
+ },
+ "useQueryCache": {
+ "type": "boolean",
+ "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. The default value is true.",
+ "default": "true"
+ },
+ "userDefinedFunctionResources": {
+ "type": "array",
+ "description": "[Experimental] Describes user-defined function resources used in the query.",
+ "items": {
+ "$ref": "UserDefinedFunctionResource"
+ }
+ },
+ "writeDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
+ }
+ }
+ },
+ "JobConfigurationTableCopy": {
+ "id": "JobConfigurationTableCopy",
+ "type": "object",
+ "properties": {
+ "createDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
+ },
+ "destinationTable": {
+ "$ref": "TableReference",
+ "description": "[Required] The destination table"
+ },
+ "sourceTable": {
+ "$ref": "TableReference",
+ "description": "[Pick one] Source table to copy."
+ },
+ "sourceTables": {
+ "type": "array",
+ "description": "[Pick one] Source tables to copy.",
+ "items": {
+ "$ref": "TableReference"
+ }
+ },
+ "writeDisposition": {
+ "type": "string",
+ "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
+ }
+ }
+ },
+ "JobList": {
+ "id": "JobList",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "A hash of this page of results."
+ },
+ "jobs": {
+ "type": "array",
+ "description": "List of jobs that were requested.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "configuration": {
+ "$ref": "JobConfiguration",
+ "description": "[Full-projection-only] Specifies the job configuration."
+ },
+ "errorResult": {
+ "$ref": "ErrorProto",
+ "description": "A result object that will be present only if the job has failed."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique opaque ID of the job."
+ },
+ "jobReference": {
+ "$ref": "JobReference",
+ "description": "Job reference uniquely identifying the job."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type.",
+ "default": "bigquery#job"
+ },
+ "state": {
+ "type": "string",
+ "description": "Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed."
+ },
+ "statistics": {
+ "$ref": "JobStatistics",
+ "description": "[Output-only] Information about the job, including starting time and ending time of the job."
+ },
+ "status": {
+ "$ref": "JobStatus",
+ "description": "[Full-projection-only] Describes the state of the job."
+ },
+ "user_email": {
+ "type": "string",
+ "description": "[Full-projection-only] Email address of the user who ran the job."
+ }
+ }
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the response.",
+ "default": "bigquery#jobList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token to request the next page of results."
+ }
+ }
+ },
+ "JobReference": {
+ "id": "JobReference",
+ "type": "object",
+ "properties": {
+ "jobId": {
+ "type": "string",
+ "description": "[Required] The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.",
+ "annotations": {
+ "required": [
+ "bigquery.jobs.getQueryResults"
+ ]
+ }
+ },
+ "projectId": {
+ "type": "string",
+ "description": "[Required] The ID of the project containing this job.",
+ "annotations": {
+ "required": [
+ "bigquery.jobs.getQueryResults"
+ ]
+ }
+ }
+ }
+ },
+ "JobStatistics": {
+ "id": "JobStatistics",
+ "type": "object",
+ "properties": {
+ "creationTime": {
+ "type": "string",
+ "description": "[Output-only] Creation time of this job, in milliseconds since the epoch. This field will be present on all jobs.",
+ "format": "int64"
+ },
+ "endTime": {
+ "type": "string",
+ "description": "[Output-only] End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.",
+ "format": "int64"
+ },
+ "extract": {
+ "$ref": "JobStatistics4",
+ "description": "[Output-only] Statistics for an extract job."
+ },
+ "load": {
+ "$ref": "JobStatistics3",
+ "description": "[Output-only] Statistics for a load job."
+ },
+ "query": {
+ "$ref": "JobStatistics2",
+ "description": "[Output-only] Statistics for a query job."
+ },
+ "startTime": {
+ "type": "string",
+ "description": "[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.",
+ "format": "int64"
+ },
+ "totalBytesProcessed": {
+ "type": "string",
+ "description": "[Output-only] [Deprecated] Use the bytes processed in the query statistics instead.",
+ "format": "int64"
+ }
+ }
+ },
+ "JobStatistics2": {
+ "id": "JobStatistics2",
+ "type": "object",
+ "properties": {
+ "cacheHit": {
+ "type": "boolean",
+ "description": "[Output-only] Whether the query result was fetched from the query cache."
+ },
+ "totalBytesProcessed": {
+ "type": "string",
+ "description": "[Output-only] Total bytes processed for this job.",
+ "format": "int64"
+ }
+ }
+ },
+ "JobStatistics3": {
+ "id": "JobStatistics3",
+ "type": "object",
+ "properties": {
+ "inputFileBytes": {
+ "type": "string",
+ "description": "[Output-only] Number of bytes of source data in a joad job.",
+ "format": "int64"
+ },
+ "inputFiles": {
+ "type": "string",
+ "description": "[Output-only] Number of source files in a load job.",
+ "format": "int64"
+ },
+ "outputBytes": {
+ "type": "string",
+ "description": "[Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.",
+ "format": "int64"
+ },
+ "outputRows": {
+ "type": "string",
+ "description": "[Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.",
+ "format": "int64"
+ }
+ }
+ },
+ "JobStatistics4": {
+ "id": "JobStatistics4",
+ "type": "object",
+ "properties": {
+ "destinationUriFileCounts": {
+ "type": "array",
+ "description": "[Experimental] Number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field.",
+ "items": {
+ "type": "string",
+ "format": "int64"
+ }
+ }
+ }
+ },
+ "JobStatus": {
+ "id": "JobStatus",
+ "type": "object",
+ "properties": {
+ "errorResult": {
+ "$ref": "ErrorProto",
+ "description": "[Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful."
+ },
+ "errors": {
+ "type": "array",
+ "description": "[Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.",
+ "items": {
+ "$ref": "ErrorProto"
+ }
+ },
+ "state": {
+ "type": "string",
+ "description": "[Output-only] Running state of the job."
+ }
+ }
+ },
+ "JsonObject": {
+ "id": "JsonObject",
+ "type": "object",
+ "description": "Represents a single JSON object.",
+ "additionalProperties": {
+ "$ref": "JsonValue"
+ }
+ },
+ "JsonValue": {
+ "id": "JsonValue",
+ "type": "any"
+ },
+ "ProjectList": {
+ "id": "ProjectList",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "A hash of the page of results"
+ },
+ "kind": {
+ "type": "string",
+ "description": "The type of list.",
+ "default": "bigquery#projectList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token to request the next page of results."
+ },
+ "projects": {
+ "type": "array",
+ "description": "Projects to which you have at least READ access.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "friendlyName": {
+ "type": "string",
+ "description": "A descriptive name for this project."
+ },
+ "id": {
+ "type": "string",
+ "description": "An opaque ID of this project."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type.",
+ "default": "bigquery#project"
+ },
+ "numericId": {
+ "type": "string",
+ "description": "The numeric ID of this project.",
+ "format": "uint64"
+ },
+ "projectReference": {
+ "$ref": "ProjectReference",
+ "description": "A unique reference to this project."
+ }
+ }
+ }
+ },
+ "totalItems": {
+ "type": "integer",
+ "description": "The total number of projects in the list.",
+ "format": "int32"
+ }
+ }
+ },
+ "ProjectReference": {
+ "id": "ProjectReference",
+ "type": "object",
+ "properties": {
+ "projectId": {
+ "type": "string",
+ "description": "[Required] ID of the project. Can be either the numeric ID or the assigned ID of the project."
+ }
+ }
+ },
+ "QueryRequest": {
+ "id": "QueryRequest",
+ "type": "object",
+ "properties": {
+ "defaultDataset": {
+ "$ref": "DatasetReference",
+ "description": "[Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'."
+ },
+ "dryRun": {
+ "type": "boolean",
+ "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the request.",
+ "default": "bigquery#queryRequest"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "[Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.",
+ "format": "uint32"
+ },
+ "preserveNulls": {
+ "type": "boolean",
+ "description": "[Deprecated] This property is deprecated."
+ },
+ "query": {
+ "type": "string",
+ "description": "[Required] A query string, following the BigQuery query syntax, of the query to execute. Example: \"SELECT count(f1) FROM [myProjectId:myDatasetId.myTableId]\".",
+ "annotations": {
+ "required": [
+ "bigquery.jobs.query"
+ ]
+ }
+ },
+ "timeoutMs": {
+ "type": "integer",
+ "description": "[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).",
+ "format": "uint32"
+ },
+ "useQueryCache": {
+ "type": "boolean",
+ "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true.",
+ "default": "true"
+ }
+ }
+ },
+ "QueryResponse": {
+ "id": "QueryResponse",
+ "type": "object",
+ "properties": {
+ "cacheHit": {
+ "type": "boolean",
+ "description": "Whether the query result was fetched from the query cache."
+ },
+ "jobComplete": {
+ "type": "boolean",
+ "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available."
+ },
+ "jobReference": {
+ "$ref": "JobReference",
+ "description": "Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type.",
+ "default": "bigquery#queryResponse"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A token used for paging results."
+ },
+ "rows": {
+ "type": "array",
+ "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.",
+ "items": {
+ "$ref": "TableRow"
+ }
+ },
+ "schema": {
+ "$ref": "TableSchema",
+ "description": "The schema of the results. Present only when the query completes successfully."
+ },
+ "totalBytesProcessed": {
+ "type": "string",
+ "description": "The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run.",
+ "format": "int64"
+ },
+ "totalRows": {
+ "type": "string",
+ "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results.",
+ "format": "uint64"
+ }
+ }
+ },
+ "Table": {
+ "id": "Table",
+ "type": "object",
+ "properties": {
+ "creationTime": {
+ "type": "string",
+ "description": "[Output-only] The time when this table was created, in milliseconds since the epoch.",
+ "format": "int64"
+ },
+ "description": {
+ "type": "string",
+ "description": "[Optional] A user-friendly description of this table."
+ },
+ "etag": {
+ "type": "string",
+ "description": "[Output-only] A hash of this resource."
+ },
+ "expirationTime": {
+ "type": "string",
+ "description": "[Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.",
+ "format": "int64"
+ },
+ "friendlyName": {
+ "type": "string",
+ "description": "[Optional] A descriptive name for this table."
+ },
+ "id": {
+ "type": "string",
+ "description": "[Output-only] An opaque ID uniquely identifying the table."
+ },
+ "kind": {
+ "type": "string",
+ "description": "[Output-only] The type of the resource.",
+ "default": "bigquery#table"
+ },
+ "lastModifiedTime": {
+ "type": "string",
+ "description": "[Output-only] The time when this table was last modified, in milliseconds since the epoch.",
+ "format": "uint64"
+ },
+ "location": {
+ "type": "string",
+ "description": "[Optional] The backing storage location."
+ },
+ "numBytes": {
+ "type": "string",
+ "description": "[Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.",
+ "format": "int64"
+ },
+ "numRows": {
+ "type": "string",
+ "description": "[Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.",
+ "format": "uint64"
+ },
+ "schema": {
+ "$ref": "TableSchema",
+ "description": "[Optional] Describes the schema of this table."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "[Output-only] A URL that can be used to access this resource again."
+ },
+ "tableReference": {
+ "$ref": "TableReference",
+ "description": "[Required] Reference describing the ID of this table."
+ },
+ "type": {
+ "type": "string",
+ "description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. The default value is TABLE."
+ },
+ "view": {
+ "$ref": "ViewDefinition",
+ "description": "[Optional] The view definition."
+ }
+ }
+ },
+ "TableCell": {
+ "id": "TableCell",
+ "type": "object",
+ "properties": {
+ "v": {
+ "type": "any"
+ }
+ }
+ },
+ "TableDataInsertAllRequest": {
+ "id": "TableDataInsertAllRequest",
+ "type": "object",
+ "properties": {
+ "ignoreUnknownValues": {
+ "type": "boolean",
+ "description": "[Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is false, which treats unknown values as errors."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the response.",
+ "default": "bigquery#tableDataInsertAllRequest"
+ },
+ "rows": {
+ "type": "array",
+ "description": "The rows to insert.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "insertId": {
+ "type": "string",
+ "description": "[Optional] A unique ID for each row. BigQuery uses this property to detect duplicate insertion requests on a best-effort basis."
+ },
+ "json": {
+ "$ref": "JsonObject",
+ "description": "[Required] A JSON object that contains a row of data. The object's properties and values must match the destination table's schema."
+ }
+ }
+ }
+ },
+ "skipInvalidRows": {
+ "type": "boolean",
+ "description": "[Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist."
+ }
+ }
+ },
+ "TableDataInsertAllResponse": {
+ "id": "TableDataInsertAllResponse",
+ "type": "object",
+ "properties": {
+ "insertErrors": {
+ "type": "array",
+ "description": "An array of errors for rows that were not inserted.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "errors": {
+ "type": "array",
+ "description": "Error information for the row indicated by the index property.",
+ "items": {
+ "$ref": "ErrorProto"
+ }
+ },
+ "index": {
+ "type": "integer",
+ "description": "The index of the row that error applies to.",
+ "format": "uint32"
+ }
+ }
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the response.",
+ "default": "bigquery#tableDataInsertAllResponse"
+ }
+ }
+ },
+ "TableDataList": {
+ "id": "TableDataList",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "A hash of this page of results."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type of the response.",
+ "default": "bigquery#tableDataList"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A token used for paging results. Providing this token instead of the startIndex parameter can help you retrieve stable results when an underlying table is changing."
+ },
+ "rows": {
+ "type": "array",
+ "description": "Rows of results.",
+ "items": {
+ "$ref": "TableRow"
+ }
+ },
+ "totalRows": {
+ "type": "string",
+ "description": "The total number of rows in the complete table.",
+ "format": "int64"
+ }
+ }
+ },
+ "TableFieldSchema": {
+ "id": "TableFieldSchema",
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "[Optional] The field description. The maximum length is 16K characters."
+ },
+ "fields": {
+ "type": "array",
+ "description": "[Optional] Describes the nested schema fields if the type property is set to RECORD.",
+ "items": {
+ "$ref": "TableFieldSchema"
+ }
+ },
+ "mode": {
+ "type": "string",
+ "description": "[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE."
+ },
+ "name": {
+ "type": "string",
+ "description": "[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters."
+ },
+ "type": {
+ "type": "string",
+ "description": "[Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema)."
+ }
+ }
+ },
+ "TableList": {
+ "id": "TableList",
+ "type": "object",
+ "properties": {
+ "etag": {
+ "type": "string",
+ "description": "A hash of this page of results."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The type of list.",
+ "default": "bigquery#tableList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token to request the next page of results."
+ },
+ "tables": {
+ "type": "array",
+ "description": "Tables in the requested dataset.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "friendlyName": {
+ "type": "string",
+ "description": "The user-friendly name for this table."
+ },
+ "id": {
+ "type": "string",
+ "description": "An opaque ID of the table"
+ },
+ "kind": {
+ "type": "string",
+ "description": "The resource type.",
+ "default": "bigquery#table"
+ },
+ "tableReference": {
+ "$ref": "TableReference",
+ "description": "A reference uniquely identifying the table."
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of table. Possible values are: TABLE, VIEW."
+ }
+ }
+ }
+ },
+ "totalItems": {
+ "type": "integer",
+ "description": "The total number of tables in the dataset.",
+ "format": "int32"
+ }
+ }
+ },
+ "TableReference": {
+ "id": "TableReference",
+ "type": "object",
+ "properties": {
+ "datasetId": {
+ "type": "string",
+ "description": "[Required] The ID of the dataset containing this table.",
+ "annotations": {
+ "required": [
+ "bigquery.tables.update"
+ ]
+ }
+ },
+ "projectId": {
+ "type": "string",
+ "description": "[Required] The ID of the project containing this table.",
+ "annotations": {
+ "required": [
+ "bigquery.tables.update"
+ ]
+ }
+ },
+ "tableId": {
+ "type": "string",
+ "description": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
+ "annotations": {
+ "required": [
+ "bigquery.tables.update"
+ ]
+ }
+ }
+ }
+ },
+ "TableRow": {
+ "id": "TableRow",
+ "type": "object",
+ "properties": {
+ "f": {
+ "type": "array",
+ "description": "Represents a single row in the result set, consisting of one or more fields.",
+ "items": {
+ "$ref": "TableCell"
+ }
+ }
+ }
+ },
+ "TableSchema": {
+ "id": "TableSchema",
+ "type": "object",
+ "properties": {
+ "fields": {
+ "type": "array",
+ "description": "Describes the fields in a table.",
+ "items": {
+ "$ref": "TableFieldSchema"
+ }
+ }
+ }
+ },
+ "UserDefinedFunctionResource": {
+ "id": "UserDefinedFunctionResource",
+ "type": "object",
+ "properties": {
+ "inlineCode": {
+ "type": "string",
+ "description": "[Pick one] An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code."
+ },
+ "resourceUri": {
+ "type": "string",
+ "description": "[Pick one] A code resource to load from a Google Cloud Storage URI (gs://bucket/path)."
+ }
+ }
+ },
+ "ViewDefinition": {
+ "id": "ViewDefinition",
+ "type": "object",
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "[Required] A query that BigQuery executes when the view is referenced."
+ }
+ }
+ }
+ },
+ "resources": {
+ "datasets": {
+ "methods": {
+ "delete": {
+ "id": "bigquery.datasets.delete",
+ "path": "projects/{projectId}/datasets/{datasetId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of dataset being deleted",
+ "required": true,
+ "location": "path"
+ },
+ "deleteContents": {
+ "type": "boolean",
+ "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False",
+ "location": "query"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the dataset being deleted",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "get": {
+ "id": "bigquery.datasets.get",
+ "path": "projects/{projectId}/datasets/{datasetId}",
+ "httpMethod": "GET",
+ "description": "Returns the dataset specified by datasetID.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the requested dataset",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the requested dataset",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId"
+ ],
+ "response": {
+ "$ref": "Dataset"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "insert": {
+ "id": "bigquery.datasets.insert",
+ "path": "projects/{projectId}/datasets",
+ "httpMethod": "POST",
+ "description": "Creates a new empty dataset.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the new dataset",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "request": {
+ "$ref": "Dataset"
+ },
+ "response": {
+ "$ref": "Dataset"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "list": {
+ "id": "bigquery.datasets.list",
+ "path": "projects/{projectId}/datasets",
+ "httpMethod": "GET",
+ "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.",
+ "parameters": {
+ "all": {
+ "type": "boolean",
+ "description": "Whether to list all datasets, including hidden ones",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "The maximum number of results to return",
+ "format": "uint32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Page token, returned by a previous call, to request the next page of results",
+ "location": "query"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the datasets to be listed",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "response": {
+ "$ref": "DatasetList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "patch": {
+ "id": "bigquery.datasets.patch",
+ "path": "projects/{projectId}/datasets/{datasetId}",
+ "httpMethod": "PATCH",
+ "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the dataset being updated",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the dataset being updated",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId"
+ ],
+ "request": {
+ "$ref": "Dataset"
+ },
+ "response": {
+ "$ref": "Dataset"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "update": {
+ "id": "bigquery.datasets.update",
+ "path": "projects/{projectId}/datasets/{datasetId}",
+ "httpMethod": "PUT",
+ "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the dataset being updated",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the dataset being updated",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId"
+ ],
+ "request": {
+ "$ref": "Dataset"
+ },
+ "response": {
+ "$ref": "Dataset"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "jobs": {
+ "methods": {
+ "cancel": {
+ "id": "bigquery.jobs.cancel",
+ "path": "project/{projectId}/jobs/{jobId}/cancel",
+ "httpMethod": "POST",
+ "description": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully.",
+ "parameters": {
+ "jobId": {
+ "type": "string",
+ "description": "Job ID of the job to cancel",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the job to cancel",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "jobId"
+ ],
+ "response": {
+ "$ref": "JobCancelResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "get": {
+ "id": "bigquery.jobs.get",
+ "path": "projects/{projectId}/jobs/{jobId}",
+ "httpMethod": "GET",
+ "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.",
+ "parameters": {
+ "jobId": {
+ "type": "string",
+ "description": "Job ID of the requested job",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the requested job",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "jobId"
+ ],
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "getQueryResults": {
+ "id": "bigquery.jobs.getQueryResults",
+ "path": "projects/{projectId}/queries/{jobId}",
+ "httpMethod": "GET",
+ "description": "Retrieves the results of a query job.",
+ "parameters": {
+ "jobId": {
+ "type": "string",
+ "description": "Job ID of the query job",
+ "required": true,
+ "location": "path"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of results to read",
+ "format": "uint32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Page token, returned by a previous call, to request the next page of results",
+ "location": "query"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the query job",
+ "required": true,
+ "location": "path"
+ },
+ "startIndex": {
+ "type": "string",
+ "description": "Zero-based index of the starting row",
+ "format": "uint64",
+ "location": "query"
+ },
+ "timeoutMs": {
+ "type": "integer",
+ "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the response will be false",
+ "format": "uint32",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "jobId"
+ ],
+ "response": {
+ "$ref": "GetQueryResultsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "insert": {
+ "id": "bigquery.jobs.insert",
+ "path": "projects/{projectId}/jobs",
+ "httpMethod": "POST",
+ "description": "Starts a new asynchronous job. Requires the Can View project role.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the project that will be billed for the job",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "request": {
+ "$ref": "Job"
+ },
+ "response": {
+ "$ref": "Job"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaUpload": true,
+ "mediaUpload": {
+ "accept": [
+ "*/*"
+ ],
+ "protocols": {
+ "simple": {
+ "multipart": true,
+ "path": "/upload/bigquery/v2/projects/{projectId}/jobs"
+ },
+ "resumable": {
+ "multipart": true,
+ "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs"
+ }
+ }
+ }
+ },
+ "list": {
+ "id": "bigquery.jobs.list",
+ "path": "projects/{projectId}/jobs",
+ "httpMethod": "GET",
+ "description": "Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.",
+ "parameters": {
+ "allUsers": {
+ "type": "boolean",
+ "description": "Whether to display jobs owned by all users in the project. Default false",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of results to return",
+ "format": "uint32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Page token, returned by a previous call, to request the next page of results",
+ "location": "query"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the jobs to list",
+ "required": true,
+ "location": "path"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Restrict information returned to a set of selected fields",
+ "enum": [
+ "full",
+ "minimal"
+ ],
+ "enumDescriptions": [
+ "Includes all job data",
+ "Does not include the job configuration"
+ ],
+ "location": "query"
+ },
+ "stateFilter": {
+ "type": "string",
+ "description": "Filter for job state",
+ "enum": [
+ "done",
+ "pending",
+ "running"
+ ],
+ "enumDescriptions": [
+ "Finished jobs",
+ "Pending jobs",
+ "Running jobs"
+ ],
+ "repeated": true,
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "response": {
+ "$ref": "JobList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "query": {
+ "id": "bigquery.jobs.query",
+ "path": "projects/{projectId}/queries",
+ "httpMethod": "POST",
+ "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the project billed for the query",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "request": {
+ "$ref": "QueryRequest"
+ },
+ "response": {
+ "$ref": "QueryResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "projects": {
+ "methods": {
+ "list": {
+ "id": "bigquery.projects.list",
+ "path": "projects",
+ "httpMethod": "GET",
+ "description": "Lists all projects to which you have been granted any project role.",
+ "parameters": {
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of results to return",
+ "format": "uint32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Page token, returned by a previous call, to request the next page of results",
+ "location": "query"
+ }
+ },
+ "response": {
+ "$ref": "ProjectList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "tabledata": {
+ "methods": {
+ "insertAll": {
+ "id": "bigquery.tabledata.insertAll",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll",
+ "httpMethod": "POST",
+ "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the destination table.",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the destination table.",
+ "required": true,
+ "location": "path"
+ },
+ "tableId": {
+ "type": "string",
+ "description": "Table ID of the destination table.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId",
+ "tableId"
+ ],
+ "request": {
+ "$ref": "TableDataInsertAllRequest"
+ },
+ "response": {
+ "$ref": "TableDataInsertAllResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/bigquery.insertdata",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "list": {
+ "id": "bigquery.tabledata.list",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data",
+ "httpMethod": "GET",
+ "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the table to read",
+ "required": true,
+ "location": "path"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of results to return",
+ "format": "uint32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Page token, returned by a previous call, identifying the result set",
+ "location": "query"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the table to read",
+ "required": true,
+ "location": "path"
+ },
+ "startIndex": {
+ "type": "string",
+ "description": "Zero-based index of the starting row to read",
+ "format": "uint64",
+ "location": "query"
+ },
+ "tableId": {
+ "type": "string",
+ "description": "Table ID of the table to read",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId",
+ "tableId"
+ ],
+ "response": {
+ "$ref": "TableDataList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "tables": {
+ "methods": {
+ "delete": {
+ "id": "bigquery.tables.delete",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the table to delete",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the table to delete",
+ "required": true,
+ "location": "path"
+ },
+ "tableId": {
+ "type": "string",
+ "description": "Table ID of the table to delete",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId",
+ "tableId"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "get": {
+ "id": "bigquery.tables.get",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ "httpMethod": "GET",
+ "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the requested table",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the requested table",
+ "required": true,
+ "location": "path"
+ },
+ "tableId": {
+ "type": "string",
+ "description": "Table ID of the requested table",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId",
+ "tableId"
+ ],
+ "response": {
+ "$ref": "Table"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "insert": {
+ "id": "bigquery.tables.insert",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables",
+ "httpMethod": "POST",
+ "description": "Creates a new, empty table in the dataset.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the new table",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the new table",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId"
+ ],
+ "request": {
+ "$ref": "Table"
+ },
+ "response": {
+ "$ref": "Table"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "list": {
+ "id": "bigquery.tables.list",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables",
+ "httpMethod": "GET",
+ "description": "Lists all tables in the specified dataset. Requires the READER dataset role.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the tables to list",
+ "required": true,
+ "location": "path"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of results to return",
+ "format": "uint32",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Page token, returned by a previous call, to request the next page of results",
+ "location": "query"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the tables to list",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId"
+ ],
+ "response": {
+ "$ref": "TableList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "patch": {
+ "id": "bigquery.tables.patch",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ "httpMethod": "PATCH",
+ "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the table to update",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the table to update",
+ "required": true,
+ "location": "path"
+ },
+ "tableId": {
+ "type": "string",
+ "description": "Table ID of the table to update",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId",
+ "tableId"
+ ],
+ "request": {
+ "$ref": "Table"
+ },
+ "response": {
+ "$ref": "Table"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "update": {
+ "id": "bigquery.tables.update",
+ "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ "httpMethod": "PUT",
+ "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.",
+ "parameters": {
+ "datasetId": {
+ "type": "string",
+ "description": "Dataset ID of the table to update",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "Project ID of the table to update",
+ "required": true,
+ "location": "path"
+ },
+ "tableId": {
+ "type": "string",
+ "description": "Table ID of the table to update",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "datasetId",
+ "tableId"
+ ],
+ "request": {
+ "$ref": "Table"
+ },
+ "response": {
+ "$ref": "Table"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/bigquery/v2/bigquery-gen.go b/Godeps/_workspace/src/google.golang.org/api/bigquery/v2/bigquery-gen.go
new file mode 100644
index 0000000..3548149
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/bigquery/v2/bigquery-gen.go
@@ -0,0 +1,3656 @@
+// Package bigquery provides access to the BigQuery API.
+//
+// See https://cloud.google.com/bigquery/
+//
+// Usage example:
+//
+// import "google.golang.org/api/bigquery/v2"
+// ...
+// bigqueryService, err := bigquery.New(oauthHttpClient)
+package bigquery
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "golang.org/x/net/context"
+ "google.golang.org/api/googleapi"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Always reference these packages, just in case the auto-generated code
+// below doesn't.
+var _ = bytes.NewBuffer
+var _ = strconv.Itoa
+var _ = fmt.Sprintf
+var _ = json.NewDecoder
+var _ = io.Copy
+var _ = url.Parse
+var _ = googleapi.Version
+var _ = errors.New
+var _ = strings.Replace
+var _ = context.Background
+
+const apiId = "bigquery:v2"
+const apiName = "bigquery"
+const apiVersion = "v2"
+const basePath = "https://www.googleapis.com/bigquery/v2/"
+
+// OAuth2 scopes used by this API.
+const (
+ // View and manage your data in Google BigQuery
+ BigqueryScope = "https://www.googleapis.com/auth/bigquery"
+
+ // Insert data into Google BigQuery
+ BigqueryInsertdataScope = "https://www.googleapis.com/auth/bigquery.insertdata"
+
+ // View and manage your data across Google Cloud Platform services
+ CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
+
+ // Manage your data and permissions in Google Cloud Storage
+ DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
+
+ // View your data in Google Cloud Storage
+ DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
+
+ // Manage your data in Google Cloud Storage
+ DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
+)
+
+func New(client *http.Client) (*Service, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+ s := &Service{client: client, BasePath: basePath}
+ s.Datasets = NewDatasetsService(s)
+ s.Jobs = NewJobsService(s)
+ s.Projects = NewProjectsService(s)
+ s.Tabledata = NewTabledataService(s)
+ s.Tables = NewTablesService(s)
+ return s, nil
+}
+
+type Service struct {
+ client *http.Client
+ BasePath string // API endpoint base URL
+ UserAgent string // optional additional User-Agent fragment
+
+ Datasets *DatasetsService
+
+ Jobs *JobsService
+
+ Projects *ProjectsService
+
+ Tabledata *TabledataService
+
+ Tables *TablesService
+}
+
+func (s *Service) userAgent() string {
+ if s.UserAgent == "" {
+ return googleapi.UserAgent
+ }
+ return googleapi.UserAgent + " " + s.UserAgent
+}
+
+func NewDatasetsService(s *Service) *DatasetsService {
+ rs := &DatasetsService{s: s}
+ return rs
+}
+
+type DatasetsService struct {
+ s *Service
+}
+
+func NewJobsService(s *Service) *JobsService {
+ rs := &JobsService{s: s}
+ return rs
+}
+
+type JobsService struct {
+ s *Service
+}
+
+func NewProjectsService(s *Service) *ProjectsService {
+ rs := &ProjectsService{s: s}
+ return rs
+}
+
+type ProjectsService struct {
+ s *Service
+}
+
+func NewTabledataService(s *Service) *TabledataService {
+ rs := &TabledataService{s: s}
+ return rs
+}
+
+type TabledataService struct {
+ s *Service
+}
+
+func NewTablesService(s *Service) *TablesService {
+ rs := &TablesService{s: s}
+ return rs
+}
+
+type TablesService struct {
+ s *Service
+}
+
+type CsvOptions struct {
+ // AllowJaggedRows: [Optional] Indicates if BigQuery should accept rows
+ // that are missing trailing optional columns. If true, BigQuery treats
+ // missing trailing columns as null values. If false, records with
+ // missing trailing columns are treated as bad records, and if there are
+ // too many bad records, an invalid error is returned in the job result.
+ // The default value is false.
+ AllowJaggedRows bool `json:"allowJaggedRows,omitempty"`
+
+ // AllowQuotedNewlines: [Optional] Indicates if BigQuery should allow
+ // quoted data sections that contain newline characters in a CSV file.
+ // The default value is false.
+ AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"`
+
+ // Encoding: [Optional] The character encoding of the data. The
+ // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.
+ // BigQuery decodes the data after the raw, binary data has been split
+ // using the values of the quote and fieldDelimiter properties.
+ Encoding string `json:"encoding,omitempty"`
+
+ // FieldDelimiter: [Optional] The separator for fields in a CSV file.
+ // BigQuery converts the string to ISO-8859-1 encoding, and then uses
+ // the first byte of the encoded string to split the data in its raw,
+ // binary state. BigQuery also supports the escape sequence "\t" to
+ // specify a tab separator. The default value is a comma (',').
+ FieldDelimiter string `json:"fieldDelimiter,omitempty"`
+
+ // Quote: [Optional] The value that is used to quote data sections in a
+ // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and
+ // then uses the first byte of the encoded string to split the data in
+ // its raw, binary state. The default value is a double-quote ('"'). If
+ // your data does not contain quoted sections, set the property value to
+ // an empty string. If your data contains quoted newline characters, you
+ // must also set the allowQuotedNewlines property to true.
+ //
+ // Default: "
+ Quote *string `json:"quote,omitempty"`
+
+ // SkipLeadingRows: [Optional] The number of rows at the top of a CSV
+ // file that BigQuery will skip when reading the data. The default value
+ // is 0. This property is useful if you have header rows in the file
+ // that should be skipped.
+ SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"`
+}
+
+type Dataset struct {
+ // Access: [Optional] An array of objects that define dataset access for
+ // one or more entities. You can set this property when inserting or
+ // updating a dataset in order to control who is allowed to access the
+ // data. If unspecified at dataset creation time, BigQuery adds default
+ // dataset access for the following entities: access.specialGroup:
+ // projectReaders; access.role: READER; access.specialGroup:
+ // projectWriters; access.role: WRITER; access.specialGroup:
+ // projectOwners; access.role: OWNER; access.userByEmail: [dataset
+ // creator email]; access.role: OWNER;
+ Access []*DatasetAccess `json:"access,omitempty"`
+
+ // CreationTime: [Output-only] The time when this dataset was created,
+ // in milliseconds since the epoch.
+ CreationTime int64 `json:"creationTime,omitempty,string"`
+
+ // DatasetReference: [Required] A reference that identifies the dataset.
+ DatasetReference *DatasetReference `json:"datasetReference,omitempty"`
+
+ // DefaultTableExpirationMs: [Experimental] The default lifetime of all
+ // tables in the dataset, in milliseconds. The minimum value is 3600000
+ // milliseconds (one hour). Once this property is set, all newly-created
+ // tables in the dataset will have an expirationTime property set to the
+ // creation time plus the value in this property, and changing the value
+ // will only affect new tables, not existing ones. When the
+ // expirationTime for a given table is reached, that table will be
+ // deleted automatically. If a table's expirationTime is modified or
+ // removed before the table expires, or if you provide an explicit
+ // expirationTime when creating a table, that value takes precedence
+ // over the default expiration time indicated by this property.
+ DefaultTableExpirationMs int64 `json:"defaultTableExpirationMs,omitempty,string"`
+
+ // Description: [Optional] A user-friendly description of the dataset.
+ Description string `json:"description,omitempty"`
+
+ // Etag: [Output-only] A hash of the resource.
+ Etag string `json:"etag,omitempty"`
+
+ // FriendlyName: [Optional] A descriptive name for the dataset.
+ FriendlyName string `json:"friendlyName,omitempty"`
+
+ // Id: [Output-only] The fully-qualified unique name of the dataset in
+ // the format projectId:datasetId. The dataset name without the project
+ // name is given in the datasetId field. When creating a new dataset,
+ // leave this field blank, and instead specify the datasetId field.
+ Id string `json:"id,omitempty"`
+
+ // Kind: [Output-only] The resource type.
+ Kind string `json:"kind,omitempty"`
+
+ // LastModifiedTime: [Output-only] The date when this dataset or any of
+ // its tables was last modified, in milliseconds since the epoch.
+ LastModifiedTime int64 `json:"lastModifiedTime,omitempty,string"`
+
+ // Location: [Experimental] The location where the data resides. If not
+ // present, the data will be stored in the US.
+ Location string `json:"location,omitempty"`
+
+ // SelfLink: [Output-only] A URL that can be used to access the resource
+ // again. You can use this URL in Get or Update requests to the
+ // resource.
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type DatasetAccess struct {
+ // Domain: [Pick one] A domain to grant access to. Any users signed in
+ // with the domain specified will be granted the specified access.
+ // Example: "example.com".
+ Domain string `json:"domain,omitempty"`
+
+ // GroupByEmail: [Pick one] An email address of a Google Group to grant
+ // access to.
+ GroupByEmail string `json:"groupByEmail,omitempty"`
+
+ // Role: [Required] Describes the rights granted to the user specified
+ // by the other member of the access object. The following string values
+ // are supported: READER, WRITER, OWNER.
+ Role string `json:"role,omitempty"`
+
+ // SpecialGroup: [Pick one] A special group to grant access to. Possible
+ // values include: projectOwners: Owners of the enclosing project.
+ // projectReaders: Readers of the enclosing project. projectWriters:
+ // Writers of the enclosing project. allAuthenticatedUsers: All
+ // authenticated BigQuery users.
+ SpecialGroup string `json:"specialGroup,omitempty"`
+
+ // UserByEmail: [Pick one] An email address of a user to grant access
+ // to. For example: fred@example.com.
+ UserByEmail string `json:"userByEmail,omitempty"`
+
+ // View: [Pick one] A view from a different dataset to grant access to.
+ // Queries executed against that view will have read access to tables in
+ // this dataset. The role field is not required when this field is set.
+ // If that view is updated by any user, access to the view needs to be
+ // granted again via an update operation.
+ View *TableReference `json:"view,omitempty"`
+}
+
+type DatasetList struct {
+ // Datasets: An array of the dataset resources in the project. Each
+ // resource contains basic information. For full information about a
+ // particular dataset resource, use the Datasets: get method. This
+ // property is omitted when there are no datasets in the project.
+ Datasets []*DatasetListDatasets `json:"datasets,omitempty"`
+
+ // Etag: A hash value of the results page. You can use this property to
+ // determine if the page has changed since the last request.
+ Etag string `json:"etag,omitempty"`
+
+ // Kind: The list type. This property always returns the value
+ // "bigquery#datasetList".
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token that can be used to request the next results
+ // page. This property is omitted on the final results page.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+}
+
+type DatasetListDatasets struct {
+ // DatasetReference: The dataset reference. Use this property to access
+ // specific parts of the dataset's ID, such as project ID or dataset ID.
+ DatasetReference *DatasetReference `json:"datasetReference,omitempty"`
+
+ // FriendlyName: A descriptive name for the dataset, if one exists.
+ FriendlyName string `json:"friendlyName,omitempty"`
+
+ // Id: The fully-qualified, unique, opaque ID of the dataset.
+ Id string `json:"id,omitempty"`
+
+ // Kind: The resource type. This property always returns the value
+ // "bigquery#dataset".
+ Kind string `json:"kind,omitempty"`
+}
+
+type DatasetReference struct {
+ // DatasetId: [Required] A unique ID for this dataset, without the
+ // project name. The ID must contain only letters (a-z, A-Z), numbers
+ // (0-9), or underscores (_). The maximum length is 1,024 characters.
+ DatasetId string `json:"datasetId,omitempty"`
+
+ // ProjectId: [Optional] The ID of the project containing this dataset.
+ ProjectId string `json:"projectId,omitempty"`
+}
+
+type ErrorProto struct {
+ // DebugInfo: Debugging information. This property is internal to Google
+ // and should not be used.
+ DebugInfo string `json:"debugInfo,omitempty"`
+
+ // Location: Specifies where the error occurred, if present.
+ Location string `json:"location,omitempty"`
+
+ // Message: A human-readable description of the error.
+ Message string `json:"message,omitempty"`
+
+ // Reason: A short error code that summarizes the error.
+ Reason string `json:"reason,omitempty"`
+}
+
+type ExternalDataConfiguration struct {
+ // Compression: [Optional] The compression type of the data source.
+ // Possible values include GZIP and NONE. The default value is NONE.
+ Compression string `json:"compression,omitempty"`
+
+ // CsvOptions: Additional properties to set if sourceFormat is set to
+ // CSV.
+ CsvOptions *CsvOptions `json:"csvOptions,omitempty"`
+
+ // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow
+ // extra values that are not represented in the table schema. If true,
+ // the extra values are ignored. If false, records with extra columns
+ // are treated as bad records, and if there are too many bad records, an
+ // invalid error is returned in the job result. The default value is
+ // false. The sourceFormat property determines what BigQuery treats as
+ // an extra value: CSV: Trailing columns
+ IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"`
+
+ // MaxBadRecords: [Optional] The maximum number of bad records that
+ // BigQuery can ignore when reading data. If the number of bad records
+ // exceeds this value, an invalid error is returned in the job result.
+ // The default value is 0, which requires that all records are valid.
+ MaxBadRecords int64 `json:"maxBadRecords,omitempty"`
+
+ // Schema: [Required] The schema for the data.
+ Schema *TableSchema `json:"schema,omitempty"`
+
+ // SourceFormat: [Optional] The data format. External data sources must
+ // be in CSV format. The default value is CSV.
+ SourceFormat string `json:"sourceFormat,omitempty"`
+
+ // SourceUris: [Required] The fully-qualified URIs that point to your
+ // data in Google Cloud Storage. Each URI can contain one '*' wildcard
+ // character and it must come after the 'bucket' name. CSV limits
+ // related to load jobs apply to external data sources, plus an
+ // additional limit of 10 GB maximum size across all URIs.
+ SourceUris []string `json:"sourceUris,omitempty"`
+}
+
+type GetQueryResultsResponse struct {
+ // CacheHit: Whether the query result was fetched from the query cache.
+ CacheHit bool `json:"cacheHit,omitempty"`
+
+ // Etag: A hash of this response.
+ Etag string `json:"etag,omitempty"`
+
+ // JobComplete: Whether the query has completed or not. If rows or
+ // totalRows are present, this will always be true. If this is false,
+ // totalRows will not be available.
+ JobComplete bool `json:"jobComplete,omitempty"`
+
+ // JobReference: Reference to the BigQuery Job that was created to run
+ // the query. This field will be present even if the original request
+ // timed out, in which case GetQueryResults can be used to read the
+ // results once the query has completed. Since this API only returns the
+ // first page of results, subsequent pages can be fetched via the same
+ // mechanism (GetQueryResults).
+ JobReference *JobReference `json:"jobReference,omitempty"`
+
+ // Kind: The resource type of the response.
+ Kind string `json:"kind,omitempty"`
+
+ // PageToken: A token used for paging results.
+ PageToken string `json:"pageToken,omitempty"`
+
+ // Rows: An object with as many results as can be contained within the
+ // maximum permitted reply size. To get any additional rows, you can
+ // call GetQueryResults and specify the jobReference returned above.
+ // Present only when the query completes successfully.
+ Rows []*TableRow `json:"rows,omitempty"`
+
+ // Schema: The schema of the results. Present only when the query
+ // completes successfully.
+ Schema *TableSchema `json:"schema,omitempty"`
+
+ // TotalBytesProcessed: The total number of bytes processed for this
+ // query.
+ TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
+
+ // TotalRows: The total number of rows in the complete query result set,
+ // which can be more than the number of rows in this single page of
+ // results. Present only when the query completes successfully.
+ TotalRows uint64 `json:"totalRows,omitempty,string"`
+}
+
+type Job struct {
+ // Configuration: [Required] Describes the job configuration.
+ Configuration *JobConfiguration `json:"configuration,omitempty"`
+
+ // Etag: [Output-only] A hash of this resource.
+ Etag string `json:"etag,omitempty"`
+
+ // Id: [Output-only] Opaque ID field of the job
+ Id string `json:"id,omitempty"`
+
+ // JobReference: [Optional] Reference describing the unique-per-user
+ // name of the job.
+ JobReference *JobReference `json:"jobReference,omitempty"`
+
+ // Kind: [Output-only] The type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // SelfLink: [Output-only] A URL that can be used to access this
+ // resource again.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Statistics: [Output-only] Information about the job, including
+ // starting time and ending time of the job.
+ Statistics *JobStatistics `json:"statistics,omitempty"`
+
+ // Status: [Output-only] The status of this job. Examine this value when
+ // polling an asynchronous job to see if the job is complete.
+ Status *JobStatus `json:"status,omitempty"`
+
+ // UserEmail: [Output-only] Email address of the user who ran the job.
+ UserEmail string `json:"user_email,omitempty"`
+}
+
+type JobCancelResponse struct {
+ // Job: The final state of the job.
+ Job *Job `json:"job,omitempty"`
+
+ // Kind: The resource type of the response.
+ Kind string `json:"kind,omitempty"`
+}
+
+type JobConfiguration struct {
+ // Copy: [Pick one] Copies a table.
+ Copy *JobConfigurationTableCopy `json:"copy,omitempty"`
+
+ // DryRun: [Optional] If set, don't actually run this job. A valid query
+ // will return a mostly empty response with some processing statistics,
+ // while an invalid query will return the same error it would if it
+ // wasn't a dry run. Behavior of non-query jobs is undefined.
+ DryRun bool `json:"dryRun,omitempty"`
+
+ // Extract: [Pick one] Configures an extract job.
+ Extract *JobConfigurationExtract `json:"extract,omitempty"`
+
+ // Link: [Pick one] Configures a link job.
+ Link *JobConfigurationLink `json:"link,omitempty"`
+
+ // Load: [Pick one] Configures a load job.
+ Load *JobConfigurationLoad `json:"load,omitempty"`
+
+ // Query: [Pick one] Configures a query job.
+ Query *JobConfigurationQuery `json:"query,omitempty"`
+}
+
+type JobConfigurationExtract struct {
+ // Compression: [Optional] The compression type to use for exported
+ // files. Possible values include GZIP and NONE. The default value is
+ // NONE.
+ Compression string `json:"compression,omitempty"`
+
+ // DestinationFormat: [Optional] The exported file format. Possible
+ // values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default
+ // value is CSV. Tables with nested or repeated fields cannot be
+ // exported as CSV.
+ DestinationFormat string `json:"destinationFormat,omitempty"`
+
+ // DestinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
+ // passing only one URI as necessary. The fully-qualified Google Cloud
+ // Storage URI where the extracted table should be written.
+ DestinationUri string `json:"destinationUri,omitempty"`
+
+ // DestinationUris: [Pick one] A list of fully-qualified Google Cloud
+ // Storage URIs where the extracted table should be written.
+ DestinationUris []string `json:"destinationUris,omitempty"`
+
+ // FieldDelimiter: [Optional] Delimiter to use between fields in the
+ // exported data. Default is ','
+ FieldDelimiter string `json:"fieldDelimiter,omitempty"`
+
+ // PrintHeader: [Optional] Whether to print out a header row in the
+ // results. Default is true.
+ //
+ // Default: true
+ PrintHeader *bool `json:"printHeader,omitempty"`
+
+ // SourceTable: [Required] A reference to the table being exported.
+ SourceTable *TableReference `json:"sourceTable,omitempty"`
+}
+
+type JobConfigurationLink struct {
+ // CreateDisposition: [Optional] Specifies whether the job is allowed to
+ // create new tables. The following values are supported:
+ // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
+ // table. CREATE_NEVER: The table must already exist. If it does not, a
+ // 'notFound' error is returned in the job result. The default value is
+ // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
+ // one atomic update upon job completion.
+ CreateDisposition string `json:"createDisposition,omitempty"`
+
+ // DestinationTable: [Required] The destination table of the link job.
+ DestinationTable *TableReference `json:"destinationTable,omitempty"`
+
+ // SourceUri: [Required] URI of source table to link.
+ SourceUri []string `json:"sourceUri,omitempty"`
+
+ // WriteDisposition: [Optional] Specifies the action that occurs if the
+ // destination table already exists. The following values are supported:
+ // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
+ // table data. WRITE_APPEND: If the table already exists, BigQuery
+ // appends the data to the table. WRITE_EMPTY: If the table already
+ // exists and contains data, a 'duplicate' error is returned in the job
+ // result. The default value is WRITE_EMPTY. Each action is atomic and
+ // only occurs if BigQuery is able to complete the job successfully.
+ // Creation, truncation and append actions occur as one atomic update
+ // upon job completion.
+ WriteDisposition string `json:"writeDisposition,omitempty"`
+}
+
+type JobConfigurationLoad struct {
+ // AllowJaggedRows: [Optional] Accept rows that are missing trailing
+ // optional columns. The missing values are treated as nulls. If false,
+ // records with missing trailing columns are treated as bad records, and
+ // if there are too many bad records, an invalid error is returned in
+ // the job result. The default value is false. Only applicable to CSV,
+ // ignored for other formats.
+ AllowJaggedRows bool `json:"allowJaggedRows,omitempty"`
+
+ // AllowQuotedNewlines: Indicates if BigQuery should allow quoted data
+ // sections that contain newline characters in a CSV file. The default
+ // value is false.
+ AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"`
+
+ // CreateDisposition: [Optional] Specifies whether the job is allowed to
+ // create new tables. The following values are supported:
+ // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
+ // table. CREATE_NEVER: The table must already exist. If it does not, a
+ // 'notFound' error is returned in the job result. The default value is
+ // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
+ // one atomic update upon job completion.
+ CreateDisposition string `json:"createDisposition,omitempty"`
+
+ // DestinationTable: [Required] The destination table to load the data
+ // into.
+ DestinationTable *TableReference `json:"destinationTable,omitempty"`
+
+ // Encoding: [Optional] The character encoding of the data. The
+ // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.
+ // BigQuery decodes the data after the raw, binary data has been split
+ // using the values of the quote and fieldDelimiter properties.
+ Encoding string `json:"encoding,omitempty"`
+
+ // FieldDelimiter: [Optional] The separator for fields in a CSV file.
+ // BigQuery converts the string to ISO-8859-1 encoding, and then uses
+ // the first byte of the encoded string to split the data in its raw,
+ // binary state. BigQuery also supports the escape sequence "\t" to
+ // specify a tab separator. The default value is a comma (',').
+ FieldDelimiter string `json:"fieldDelimiter,omitempty"`
+
+ // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow
+ // extra values that are not represented in the table schema. If true,
+ // the extra values are ignored. If false, records with extra columns
+ // are treated as bad records, and if there are too many bad records, an
+ // invalid error is returned in the job result. The default value is
+ // false. The sourceFormat property determines what BigQuery treats as
+ // an extra value: CSV: Trailing columns JSON: Named values that don't
+ // match any column names
+ IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"`
+
+ // MaxBadRecords: [Optional] The maximum number of bad records that
+ // BigQuery can ignore when running the job. If the number of bad
+ // records exceeds this value, an invalid error is returned in the job
+ // result. The default value is 0, which requires that all records are
+ // valid.
+ MaxBadRecords int64 `json:"maxBadRecords,omitempty"`
+
+ // ProjectionFields: [Experimental] If sourceFormat is set to
+ // "DATASTORE_BACKUP", indicates which entity properties to load into
+ // BigQuery from a Cloud Datastore backup. Property names are case
+ // sensitive and must be top-level properties. If no properties are
+ // specified, BigQuery loads all properties. If any named property isn't
+ // found in the Cloud Datastore backup, an invalid error is returned in
+ // the job result.
+ ProjectionFields []string `json:"projectionFields,omitempty"`
+
+ // Quote: [Optional] The value that is used to quote data sections in a
+ // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and
+ // then uses the first byte of the encoded string to split the data in
+ // its raw, binary state. The default value is a double-quote ('"'). If
+ // your data does not contain quoted sections, set the property value to
+ // an empty string. If your data contains quoted newline characters, you
+ // must also set the allowQuotedNewlines property to true.
+ //
+ // Default: "
+ Quote *string `json:"quote,omitempty"`
+
+ // Schema: [Optional] The schema for the destination table. The schema
+ // can be omitted if the destination table already exists or if the
+ // schema can be inferred from the loaded data.
+ Schema *TableSchema `json:"schema,omitempty"`
+
+ // SchemaInline: [Deprecated] The inline schema. For CSV schemas,
+ // specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING,
+ // bar:INTEGER, baz:FLOAT".
+ SchemaInline string `json:"schemaInline,omitempty"`
+
+ // SchemaInlineFormat: [Deprecated] The format of the schemaInline
+ // property.
+ SchemaInlineFormat string `json:"schemaInlineFormat,omitempty"`
+
+ // SkipLeadingRows: [Optional] The number of rows at the top of a CSV
+ // file that BigQuery will skip when loading the data. The default value
+ // is 0. This property is useful if you have header rows in the file
+ // that should be skipped.
+ SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"`
+
+ // SourceFormat: [Optional] The format of the data files. For CSV files,
+ // specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
+ // newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default
+ // value is CSV.
+ SourceFormat string `json:"sourceFormat,omitempty"`
+
+ // SourceUris: [Required] The fully-qualified URIs that point to your
+ // data in Google Cloud Storage. Each URI can contain one '*' wildcard
+ // character and it must come after the 'bucket' name.
+ SourceUris []string `json:"sourceUris,omitempty"`
+
+ // WriteDisposition: [Optional] Specifies the action that occurs if the
+ // destination table already exists. The following values are supported:
+ // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
+ // table data. WRITE_APPEND: If the table already exists, BigQuery
+ // appends the data to the table. WRITE_EMPTY: If the table already
+ // exists and contains data, a 'duplicate' error is returned in the job
+ // result. The default value is WRITE_APPEND. Each action is atomic and
+ // only occurs if BigQuery is able to complete the job successfully.
+ // Creation, truncation and append actions occur as one atomic update
+ // upon job completion.
+ WriteDisposition string `json:"writeDisposition,omitempty"`
+}
+
+type JobConfigurationQuery struct {
+ // AllowLargeResults: If true, allows the query to produce arbitrarily
+ // large result tables at a slight cost in performance. Requires
+ // destinationTable to be set.
+ AllowLargeResults bool `json:"allowLargeResults,omitempty"`
+
+ // CreateDisposition: [Optional] Specifies whether the job is allowed to
+ // create new tables. The following values are supported:
+ // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
+ // table. CREATE_NEVER: The table must already exist. If it does not, a
+ // 'notFound' error is returned in the job result. The default value is
+ // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
+ // one atomic update upon job completion.
+ CreateDisposition string `json:"createDisposition,omitempty"`
+
+ // DefaultDataset: [Optional] Specifies the default dataset to use for
+ // unqualified table names in the query.
+ DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"`
+
+ // DestinationTable: [Optional] Describes the table where the query
+ // results should be stored. If not present, a new table will be created
+ // to store the results.
+ DestinationTable *TableReference `json:"destinationTable,omitempty"`
+
+ // FlattenResults: [Optional] Flattens all nested and repeated fields in
+ // the query results. The default value is true. allowLargeResults must
+ // be true if this is set to false.
+ //
+ // Default: true
+ FlattenResults *bool `json:"flattenResults,omitempty"`
+
+ // PreserveNulls: [Deprecated] This property is deprecated.
+ PreserveNulls bool `json:"preserveNulls,omitempty"`
+
+ // Priority: [Optional] Specifies a priority for the query. Possible
+ // values include INTERACTIVE and BATCH. The default value is
+ // INTERACTIVE.
+ Priority string `json:"priority,omitempty"`
+
+ // Query: [Required] BigQuery SQL query to execute.
+ Query string `json:"query,omitempty"`
+
+ // TableDefinitions: [Experimental] If querying an external data source
+ // outside of BigQuery, describes the data format, location and other
+ // properties of the data source. By defining these properties, the data
+ // source can then be queried as if it were a standard BigQuery table.
+ TableDefinitions map[string]ExternalDataConfiguration `json:"tableDefinitions,omitempty"`
+
+ // UseQueryCache: [Optional] Whether to look for the result in the query
+ // cache. The query cache is a best-effort cache that will be flushed
+ // whenever tables in the query are modified. Moreover, the query cache
+ // is only available when a query does not have a destination table
+ // specified. The default value is true.
+ //
+ // Default: true
+ UseQueryCache *bool `json:"useQueryCache,omitempty"`
+
+ // UserDefinedFunctionResources: [Experimental] Describes user-defined
+ // function resources used in the query.
+ UserDefinedFunctionResources []*UserDefinedFunctionResource `json:"userDefinedFunctionResources,omitempty"`
+
+ // WriteDisposition: [Optional] Specifies the action that occurs if the
+ // destination table already exists. The following values are supported:
+ // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
+ // table data. WRITE_APPEND: If the table already exists, BigQuery
+ // appends the data to the table. WRITE_EMPTY: If the table already
+ // exists and contains data, a 'duplicate' error is returned in the job
+ // result. The default value is WRITE_EMPTY. Each action is atomic and
+ // only occurs if BigQuery is able to complete the job successfully.
+ // Creation, truncation and append actions occur as one atomic update
+ // upon job completion.
+ WriteDisposition string `json:"writeDisposition,omitempty"`
+}
+
+type JobConfigurationTableCopy struct {
+ // CreateDisposition: [Optional] Specifies whether the job is allowed to
+ // create new tables. The following values are supported:
+ // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
+ // table. CREATE_NEVER: The table must already exist. If it does not, a
+ // 'notFound' error is returned in the job result. The default value is
+ // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
+ // one atomic update upon job completion.
+ CreateDisposition string `json:"createDisposition,omitempty"`
+
+ // DestinationTable: [Required] The destination table
+ DestinationTable *TableReference `json:"destinationTable,omitempty"`
+
+ // SourceTable: [Pick one] Source table to copy.
+ SourceTable *TableReference `json:"sourceTable,omitempty"`
+
+ // SourceTables: [Pick one] Source tables to copy.
+ SourceTables []*TableReference `json:"sourceTables,omitempty"`
+
+ // WriteDisposition: [Optional] Specifies the action that occurs if the
+ // destination table already exists. The following values are supported:
+ // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
+ // table data. WRITE_APPEND: If the table already exists, BigQuery
+ // appends the data to the table. WRITE_EMPTY: If the table already
+ // exists and contains data, a 'duplicate' error is returned in the job
+ // result. The default value is WRITE_EMPTY. Each action is atomic and
+ // only occurs if BigQuery is able to complete the job successfully.
+ // Creation, truncation and append actions occur as one atomic update
+ // upon job completion.
+ WriteDisposition string `json:"writeDisposition,omitempty"`
+}
+
+type JobList struct {
+ // Etag: A hash of this page of results.
+ Etag string `json:"etag,omitempty"`
+
+ // Jobs: List of jobs that were requested.
+ Jobs []*JobListJobs `json:"jobs,omitempty"`
+
+ // Kind: The resource type of the response.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token to request the next page of results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+}
+
+type JobListJobs struct {
+ // Configuration: [Full-projection-only] Specifies the job
+ // configuration.
+ Configuration *JobConfiguration `json:"configuration,omitempty"`
+
+ // ErrorResult: A result object that will be present only if the job has
+ // failed.
+ ErrorResult *ErrorProto `json:"errorResult,omitempty"`
+
+ // Id: Unique opaque ID of the job.
+ Id string `json:"id,omitempty"`
+
+ // JobReference: Job reference uniquely identifying the job.
+ JobReference *JobReference `json:"jobReference,omitempty"`
+
+ // Kind: The resource type.
+ Kind string `json:"kind,omitempty"`
+
+ // State: Running state of the job. When the state is DONE, errorResult
+ // can be checked to determine whether the job succeeded or failed.
+ State string `json:"state,omitempty"`
+
+ // Statistics: [Output-only] Information about the job, including
+ // starting time and ending time of the job.
+ Statistics *JobStatistics `json:"statistics,omitempty"`
+
+ // Status: [Full-projection-only] Describes the state of the job.
+ Status *JobStatus `json:"status,omitempty"`
+
+ // UserEmail: [Full-projection-only] Email address of the user who ran
+ // the job.
+ UserEmail string `json:"user_email,omitempty"`
+}
+
+type JobReference struct {
+ // JobId: [Required] The ID of the job. The ID must contain only letters
+ // (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The
+ // maximum length is 1,024 characters.
+ JobId string `json:"jobId,omitempty"`
+
+ // ProjectId: [Required] The ID of the project containing this job.
+ ProjectId string `json:"projectId,omitempty"`
+}
+
+type JobStatistics struct {
+ // CreationTime: [Output-only] Creation time of this job, in
+ // milliseconds since the epoch. This field will be present on all jobs.
+ CreationTime int64 `json:"creationTime,omitempty,string"`
+
+ // EndTime: [Output-only] End time of this job, in milliseconds since
+ // the epoch. This field will be present whenever a job is in the DONE
+ // state.
+ EndTime int64 `json:"endTime,omitempty,string"`
+
+ // Extract: [Output-only] Statistics for an extract job.
+ Extract *JobStatistics4 `json:"extract,omitempty"`
+
+ // Load: [Output-only] Statistics for a load job.
+ Load *JobStatistics3 `json:"load,omitempty"`
+
+ // Query: [Output-only] Statistics for a query job.
+ Query *JobStatistics2 `json:"query,omitempty"`
+
+ // StartTime: [Output-only] Start time of this job, in milliseconds
+ // since the epoch. This field will be present when the job transitions
+ // from the PENDING state to either RUNNING or DONE.
+ StartTime int64 `json:"startTime,omitempty,string"`
+
+ // TotalBytesProcessed: [Output-only] [Deprecated] Use the bytes
+ // processed in the query statistics instead.
+ TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
+}
+
+type JobStatistics2 struct {
+ // CacheHit: [Output-only] Whether the query result was fetched from the
+ // query cache.
+ CacheHit bool `json:"cacheHit,omitempty"`
+
+ // TotalBytesProcessed: [Output-only] Total bytes processed for this
+ // job.
+ TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
+}
+
+type JobStatistics3 struct {
+ // InputFileBytes: [Output-only] Number of bytes of source data in a
+ // joad job.
+ InputFileBytes int64 `json:"inputFileBytes,omitempty,string"`
+
+ // InputFiles: [Output-only] Number of source files in a load job.
+ InputFiles int64 `json:"inputFiles,omitempty,string"`
+
+ // OutputBytes: [Output-only] Size of the loaded data in bytes. Note
+ // that while an import job is in the running state, this value may
+ // change.
+ OutputBytes int64 `json:"outputBytes,omitempty,string"`
+
+ // OutputRows: [Output-only] Number of rows imported in a load job. Note
+ // that while an import job is in the running state, this value may
+ // change.
+ OutputRows int64 `json:"outputRows,omitempty,string"`
+}
+
+type JobStatistics4 struct {
+ // DestinationUriFileCounts: [Experimental] Number of files per
+ // destination URI or URI pattern specified in the extract
+ // configuration. These values will be in the same order as the URIs
+ // specified in the 'destinationUris' field.
+ DestinationUriFileCounts googleapi.Int64s `json:"destinationUriFileCounts,omitempty"`
+}
+
+type JobStatus struct {
+ // ErrorResult: [Output-only] Final error result of the job. If present,
+ // indicates that the job has completed and was unsuccessful.
+ ErrorResult *ErrorProto `json:"errorResult,omitempty"`
+
+ // Errors: [Output-only] All errors encountered during the running of
+ // the job. Errors here do not necessarily mean that the job has
+ // completed or was unsuccessful.
+ Errors []*ErrorProto `json:"errors,omitempty"`
+
+ // State: [Output-only] Running state of the job.
+ State string `json:"state,omitempty"`
+}
+
+type JsonValue interface{}
+
+type ProjectList struct {
+ // Etag: A hash of the page of results
+ Etag string `json:"etag,omitempty"`
+
+ // Kind: The type of list.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token to request the next page of results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // Projects: Projects to which you have at least READ access.
+ Projects []*ProjectListProjects `json:"projects,omitempty"`
+
+ // TotalItems: The total number of projects in the list.
+ TotalItems int64 `json:"totalItems,omitempty"`
+}
+
+type ProjectListProjects struct {
+ // FriendlyName: A descriptive name for this project.
+ FriendlyName string `json:"friendlyName,omitempty"`
+
+ // Id: An opaque ID of this project.
+ Id string `json:"id,omitempty"`
+
+ // Kind: The resource type.
+ Kind string `json:"kind,omitempty"`
+
+ // NumericId: The numeric ID of this project.
+ NumericId uint64 `json:"numericId,omitempty,string"`
+
+ // ProjectReference: A unique reference to this project.
+ ProjectReference *ProjectReference `json:"projectReference,omitempty"`
+}
+
+type ProjectReference struct {
+ // ProjectId: [Required] ID of the project. Can be either the numeric ID
+ // or the assigned ID of the project.
+ ProjectId string `json:"projectId,omitempty"`
+}
+
+type QueryRequest struct {
+ // DefaultDataset: [Optional] Specifies the default datasetId and
+ // projectId to assume for any unqualified table names in the query. If
+ // not set, all table names in the query string must be qualified in the
+ // format 'datasetId.tableId'.
+ DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"`
+
+ // DryRun: [Optional] If set, don't actually run this job. A valid query
+ // will return a mostly empty response with some processing statistics,
+ // while an invalid query will return the same error it would if it
+ // wasn't a dry run.
+ DryRun bool `json:"dryRun,omitempty"`
+
+ // Kind: The resource type of the request.
+ Kind string `json:"kind,omitempty"`
+
+ // MaxResults: [Optional] The maximum number of rows of data to return
+ // per page of results. Setting this flag to a small value such as 1000
+ // and then paging through results might improve reliability when the
+ // query result set is large. In addition to this limit, responses are
+ // also limited to 10 MB. By default, there is no maximum row count, and
+ // only the byte limit applies.
+ MaxResults int64 `json:"maxResults,omitempty"`
+
+ // PreserveNulls: [Deprecated] This property is deprecated.
+ PreserveNulls bool `json:"preserveNulls,omitempty"`
+
+ // Query: [Required] A query string, following the BigQuery query
+ // syntax, of the query to execute. Example: "SELECT count(f1) FROM
+ // [myProjectId:myDatasetId.myTableId]".
+ Query string `json:"query,omitempty"`
+
+ // TimeoutMs: [Optional] How long to wait for the query to complete, in
+ // milliseconds, before the request times out and returns. Note that
+ // this is only a timeout for the request, not the query. If the query
+ // takes longer to run than the timeout value, the call returns without
+ // any results and with the 'jobComplete' flag set to false. You can
+ // call GetQueryResults() to wait for the query to complete and read the
+ // results. The default value is 10000 milliseconds (10 seconds).
+ TimeoutMs int64 `json:"timeoutMs,omitempty"`
+
+ // UseQueryCache: [Optional] Whether to look for the result in the query
+ // cache. The query cache is a best-effort cache that will be flushed
+ // whenever tables in the query are modified. The default value is true.
+ //
+ // Default: true
+ UseQueryCache *bool `json:"useQueryCache,omitempty"`
+}
+
+type QueryResponse struct {
+ // CacheHit: Whether the query result was fetched from the query cache.
+ CacheHit bool `json:"cacheHit,omitempty"`
+
+ // JobComplete: Whether the query has completed or not. If rows or
+ // totalRows are present, this will always be true. If this is false,
+ // totalRows will not be available.
+ JobComplete bool `json:"jobComplete,omitempty"`
+
+ // JobReference: Reference to the Job that was created to run the query.
+ // This field will be present even if the original request timed out, in
+ // which case GetQueryResults can be used to read the results once the
+ // query has completed. Since this API only returns the first page of
+ // results, subsequent pages can be fetched via the same mechanism
+ // (GetQueryResults).
+ JobReference *JobReference `json:"jobReference,omitempty"`
+
+ // Kind: The resource type.
+ Kind string `json:"kind,omitempty"`
+
+ // PageToken: A token used for paging results.
+ PageToken string `json:"pageToken,omitempty"`
+
+ // Rows: An object with as many results as can be contained within the
+ // maximum permitted reply size. To get any additional rows, you can
+ // call GetQueryResults and specify the jobReference returned above.
+ Rows []*TableRow `json:"rows,omitempty"`
+
+ // Schema: The schema of the results. Present only when the query
+ // completes successfully.
+ Schema *TableSchema `json:"schema,omitempty"`
+
+ // TotalBytesProcessed: The total number of bytes processed for this
+ // query. If this query was a dry run, this is the number of bytes that
+ // would be processed if the query were run.
+ TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
+
+ // TotalRows: The total number of rows in the complete query result set,
+ // which can be more than the number of rows in this single page of
+ // results.
+ TotalRows uint64 `json:"totalRows,omitempty,string"`
+}
+
+type Table struct {
+ // CreationTime: [Output-only] The time when this table was created, in
+ // milliseconds since the epoch.
+ CreationTime int64 `json:"creationTime,omitempty,string"`
+
+ // Description: [Optional] A user-friendly description of this table.
+ Description string `json:"description,omitempty"`
+
+ // Etag: [Output-only] A hash of this resource.
+ Etag string `json:"etag,omitempty"`
+
+ // ExpirationTime: [Optional] The time when this table expires, in
+ // milliseconds since the epoch. If not present, the table will persist
+ // indefinitely. Expired tables will be deleted and their storage
+ // reclaimed.
+ ExpirationTime int64 `json:"expirationTime,omitempty,string"`
+
+ // FriendlyName: [Optional] A descriptive name for this table.
+ FriendlyName string `json:"friendlyName,omitempty"`
+
+ // Id: [Output-only] An opaque ID uniquely identifying the table.
+ Id string `json:"id,omitempty"`
+
+ // Kind: [Output-only] The type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // LastModifiedTime: [Output-only] The time when this table was last
+ // modified, in milliseconds since the epoch.
+ LastModifiedTime uint64 `json:"lastModifiedTime,omitempty,string"`
+
+ // Location: [Optional] The backing storage location.
+ Location string `json:"location,omitempty"`
+
+ // NumBytes: [Output-only] The size of the table in bytes. This property
+ // is unavailable for tables that are actively receiving streaming
+ // inserts.
+ NumBytes int64 `json:"numBytes,omitempty,string"`
+
+ // NumRows: [Output-only] The number of rows of data in this table. This
+ // property is unavailable for tables that are actively receiving
+ // streaming inserts.
+ NumRows uint64 `json:"numRows,omitempty,string"`
+
+ // Schema: [Optional] Describes the schema of this table.
+ Schema *TableSchema `json:"schema,omitempty"`
+
+ // SelfLink: [Output-only] A URL that can be used to access this
+ // resource again.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // TableReference: [Required] Reference describing the ID of this table.
+ TableReference *TableReference `json:"tableReference,omitempty"`
+
+ // Type: [Output-only] Describes the table type. The following values
+ // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table
+ // defined by a SQL query. The default value is TABLE.
+ Type string `json:"type,omitempty"`
+
+ // View: [Optional] The view definition.
+ View *ViewDefinition `json:"view,omitempty"`
+}
+
+type TableCell struct {
+ V interface{} `json:"v,omitempty"`
+}
+
+type TableDataInsertAllRequest struct {
+ // IgnoreUnknownValues: [Optional] Accept rows that contain values that
+ // do not match the schema. The unknown values are ignored. Default is
+ // false, which treats unknown values as errors.
+ IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"`
+
+ // Kind: The resource type of the response.
+ Kind string `json:"kind,omitempty"`
+
+ // Rows: The rows to insert.
+ Rows []*TableDataInsertAllRequestRows `json:"rows,omitempty"`
+
+ // SkipInvalidRows: [Optional] Insert all valid rows of a request, even
+ // if invalid rows exist. The default value is false, which causes the
+ // entire request to fail if any invalid rows exist.
+ SkipInvalidRows bool `json:"skipInvalidRows,omitempty"`
+}
+
+type TableDataInsertAllRequestRows struct {
+ // InsertId: [Optional] A unique ID for each row. BigQuery uses this
+ // property to detect duplicate insertion requests on a best-effort
+ // basis.
+ InsertId string `json:"insertId,omitempty"`
+
+ // Json: [Required] A JSON object that contains a row of data. The
+ // object's properties and values must match the destination table's
+ // schema.
+ Json map[string]JsonValue `json:"json,omitempty"`
+}
+
+type TableDataInsertAllResponse struct {
+ // InsertErrors: An array of errors for rows that were not inserted.
+ InsertErrors []*TableDataInsertAllResponseInsertErrors `json:"insertErrors,omitempty"`
+
+ // Kind: The resource type of the response.
+ Kind string `json:"kind,omitempty"`
+}
+
+type TableDataInsertAllResponseInsertErrors struct {
+ // Errors: Error information for the row indicated by the index
+ // property.
+ Errors []*ErrorProto `json:"errors,omitempty"`
+
+ // Index: The index of the row that error applies to.
+ Index int64 `json:"index,omitempty"`
+}
+
+type TableDataList struct {
+ // Etag: A hash of this page of results.
+ Etag string `json:"etag,omitempty"`
+
+ // Kind: The resource type of the response.
+ Kind string `json:"kind,omitempty"`
+
+ // PageToken: A token used for paging results. Providing this token
+ // instead of the startIndex parameter can help you retrieve stable
+ // results when an underlying table is changing.
+ PageToken string `json:"pageToken,omitempty"`
+
+ // Rows: Rows of results.
+ Rows []*TableRow `json:"rows,omitempty"`
+
+ // TotalRows: The total number of rows in the complete table.
+ TotalRows int64 `json:"totalRows,omitempty,string"`
+}
+
+type TableFieldSchema struct {
+ // Description: [Optional] The field description. The maximum length is
+ // 16K characters.
+ Description string `json:"description,omitempty"`
+
+ // Fields: [Optional] Describes the nested schema fields if the type
+ // property is set to RECORD.
+ Fields []*TableFieldSchema `json:"fields,omitempty"`
+
+ // Mode: [Optional] The field mode. Possible values include NULLABLE,
+ // REQUIRED and REPEATED. The default value is NULLABLE.
+ Mode string `json:"mode,omitempty"`
+
+ // Name: [Required] The field name. The name must contain only letters
+ // (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a
+ // letter or underscore. The maximum length is 128 characters.
+ Name string `json:"name,omitempty"`
+
+ // Type: [Required] The field data type. Possible values include STRING,
+ // INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates
+ // that the field contains a nested schema).
+ Type string `json:"type,omitempty"`
+}
+
+type TableList struct {
+ // Etag: A hash of this page of results.
+ Etag string `json:"etag,omitempty"`
+
+ // Kind: The type of list.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token to request the next page of results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // Tables: Tables in the requested dataset.
+ Tables []*TableListTables `json:"tables,omitempty"`
+
+ // TotalItems: The total number of tables in the dataset.
+ TotalItems int64 `json:"totalItems,omitempty"`
+}
+
+type TableListTables struct {
+ // FriendlyName: The user-friendly name for this table.
+ FriendlyName string `json:"friendlyName,omitempty"`
+
+ // Id: An opaque ID of the table
+ Id string `json:"id,omitempty"`
+
+ // Kind: The resource type.
+ Kind string `json:"kind,omitempty"`
+
+ // TableReference: A reference uniquely identifying the table.
+ TableReference *TableReference `json:"tableReference,omitempty"`
+
+ // Type: The type of table. Possible values are: TABLE, VIEW.
+ Type string `json:"type,omitempty"`
+}
+
+type TableReference struct {
+ // DatasetId: [Required] The ID of the dataset containing this table.
+ DatasetId string `json:"datasetId,omitempty"`
+
+ // ProjectId: [Required] The ID of the project containing this table.
+ ProjectId string `json:"projectId,omitempty"`
+
+ // TableId: [Required] The ID of the table. The ID must contain only
+ // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum
+ // length is 1,024 characters.
+ TableId string `json:"tableId,omitempty"`
+}
+
+type TableRow struct {
+ // F: Represents a single row in the result set, consisting of one or
+ // more fields.
+ F []*TableCell `json:"f,omitempty"`
+}
+
+type TableSchema struct {
+ // Fields: Describes the fields in a table.
+ Fields []*TableFieldSchema `json:"fields,omitempty"`
+}
+
+type UserDefinedFunctionResource struct {
+ // InlineCode: [Pick one] An inline resource that contains code for a
+ // user-defined function (UDF). Providing a inline code resource is
+ // equivalent to providing a URI for a file containing the same code.
+ InlineCode string `json:"inlineCode,omitempty"`
+
+ // ResourceUri: [Pick one] A code resource to load from a Google Cloud
+ // Storage URI (gs://bucket/path).
+ ResourceUri string `json:"resourceUri,omitempty"`
+}
+
+type ViewDefinition struct {
+ // Query: [Required] A query that BigQuery executes when the view is
+ // referenced.
+ Query string `json:"query,omitempty"`
+}
+
+// method id "bigquery.datasets.delete":
+
+type DatasetsDeleteCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the dataset specified by the datasetId value. Before
+// you can delete a dataset, you must delete all its tables, either
+// manually or by specifying deleteContents. Immediately after deletion,
+// you can create another dataset with the same name.
+func (r *DatasetsService) Delete(projectId string, datasetId string) *DatasetsDeleteCall {
+ c := &DatasetsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ return c
+}
+
+// DeleteContents sets the optional parameter "deleteContents": If True,
+// delete all the tables in the dataset. If False and the dataset
+// contains tables, the request will fail. Default is False
+func (c *DatasetsDeleteCall) DeleteContents(deleteContents bool) *DatasetsDeleteCall {
+ c.opt_["deleteContents"] = deleteContents
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DatasetsDeleteCall) Fields(s ...googleapi.Field) *DatasetsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DatasetsDeleteCall) Do() error {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["deleteContents"]; ok {
+ params.Set("deleteContents", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.",
+ // "httpMethod": "DELETE",
+ // "id": "bigquery.datasets.delete",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of dataset being deleted",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "deleteContents": {
+ // "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False",
+ // "location": "query",
+ // "type": "boolean"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the dataset being deleted",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.datasets.get":
+
+type DatasetsGetCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the dataset specified by datasetID.
+func (r *DatasetsService) Get(projectId string, datasetId string) *DatasetsGetCall {
+ c := &DatasetsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DatasetsGetCall) Fields(s ...googleapi.Field) *DatasetsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DatasetsGetCall) Do() (*Dataset, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Dataset
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the dataset specified by datasetID.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.datasets.get",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the requested dataset",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the requested dataset",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}",
+ // "response": {
+ // "$ref": "Dataset"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.datasets.insert":
+
+type DatasetsInsertCall struct {
+ s *Service
+ projectId string
+ dataset *Dataset
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a new empty dataset.
+func (r *DatasetsService) Insert(projectId string, dataset *Dataset) *DatasetsInsertCall {
+ c := &DatasetsInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.dataset = dataset
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DatasetsInsertCall) Fields(s ...googleapi.Field) *DatasetsInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DatasetsInsertCall) Do() (*Dataset, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Dataset
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a new empty dataset.",
+ // "httpMethod": "POST",
+ // "id": "bigquery.datasets.insert",
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "Project ID of the new dataset",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets",
+ // "request": {
+ // "$ref": "Dataset"
+ // },
+ // "response": {
+ // "$ref": "Dataset"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.datasets.list":
+
+type DatasetsListCall struct {
+ s *Service
+ projectId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all datasets in the specified project to which you have
+// been granted the READER dataset role.
+func (r *DatasetsService) List(projectId string) *DatasetsListCall {
+ c := &DatasetsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ return c
+}
+
+// All sets the optional parameter "all": Whether to list all datasets,
+// including hidden ones
+func (c *DatasetsListCall) All(all bool) *DatasetsListCall {
+ c.opt_["all"] = all
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": The maximum
+// number of results to return
+func (c *DatasetsListCall) MaxResults(maxResults int64) *DatasetsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Page token,
+// returned by a previous call, to request the next page of results
+func (c *DatasetsListCall) PageToken(pageToken string) *DatasetsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DatasetsListCall) Fields(s ...googleapi.Field) *DatasetsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DatasetsListCall) Do() (*DatasetList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["all"]; ok {
+ params.Set("all", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *DatasetList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.datasets.list",
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "all": {
+ // "description": "Whether to list all datasets, including hidden ones",
+ // "location": "query",
+ // "type": "boolean"
+ // },
+ // "maxResults": {
+ // "description": "The maximum number of results to return",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Page token, returned by a previous call, to request the next page of results",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the datasets to be listed",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets",
+ // "response": {
+ // "$ref": "DatasetList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.datasets.patch":
+
+type DatasetsPatchCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ dataset *Dataset
+ opt_ map[string]interface{}
+}
+
+// Patch: Updates information in an existing dataset. The update method
+// replaces the entire dataset resource, whereas the patch method only
+// replaces fields that are provided in the submitted dataset resource.
+// This method supports patch semantics.
+func (r *DatasetsService) Patch(projectId string, datasetId string, dataset *Dataset) *DatasetsPatchCall {
+ c := &DatasetsPatchCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.dataset = dataset
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DatasetsPatchCall) Fields(s ...googleapi.Field) *DatasetsPatchCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DatasetsPatchCall) Do() (*Dataset, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Dataset
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "bigquery.datasets.patch",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the dataset being updated",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the dataset being updated",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}",
+ // "request": {
+ // "$ref": "Dataset"
+ // },
+ // "response": {
+ // "$ref": "Dataset"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.datasets.update":
+
+type DatasetsUpdateCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ dataset *Dataset
+ opt_ map[string]interface{}
+}
+
+// Update: Updates information in an existing dataset. The update method
+// replaces the entire dataset resource, whereas the patch method only
+// replaces fields that are provided in the submitted dataset resource.
+func (r *DatasetsService) Update(projectId string, datasetId string, dataset *Dataset) *DatasetsUpdateCall {
+ c := &DatasetsUpdateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.dataset = dataset
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DatasetsUpdateCall) Fields(s ...googleapi.Field) *DatasetsUpdateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DatasetsUpdateCall) Do() (*Dataset, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Dataset
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.",
+ // "httpMethod": "PUT",
+ // "id": "bigquery.datasets.update",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the dataset being updated",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the dataset being updated",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}",
+ // "request": {
+ // "$ref": "Dataset"
+ // },
+ // "response": {
+ // "$ref": "Dataset"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.jobs.cancel":
+
+type JobsCancelCall struct {
+ s *Service
+ projectId string
+ jobId string
+ opt_ map[string]interface{}
+}
+
+// Cancel: Requests that a job be cancelled. This call will return
+// immediately, and the client will need to poll for the job status to
+// see if the cancel completed successfully.
+func (r *JobsService) Cancel(projectId string, jobId string) *JobsCancelCall {
+ c := &JobsCancelCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.jobId = jobId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *JobsCancelCall) Fields(s ...googleapi.Field) *JobsCancelCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *JobsCancelCall) Do() (*JobCancelResponse, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "project/{projectId}/jobs/{jobId}/cancel")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "jobId": c.jobId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *JobCancelResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully.",
+ // "httpMethod": "POST",
+ // "id": "bigquery.jobs.cancel",
+ // "parameterOrder": [
+ // "projectId",
+ // "jobId"
+ // ],
+ // "parameters": {
+ // "jobId": {
+ // "description": "Job ID of the job to cancel",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the job to cancel",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "project/{projectId}/jobs/{jobId}/cancel",
+ // "response": {
+ // "$ref": "JobCancelResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.jobs.get":
+
+type JobsGetCall struct {
+ s *Service
+ projectId string
+ jobId string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns information about a specific job. Job information is
+// available for a six month period after creation. Requires that you're
+// the person who ran the job, or have the Is Owner project role.
+func (r *JobsService) Get(projectId string, jobId string) *JobsGetCall {
+ c := &JobsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.jobId = jobId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *JobsGetCall) Fields(s ...googleapi.Field) *JobsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *JobsGetCall) Do() (*Job, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs/{jobId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "jobId": c.jobId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Job
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.jobs.get",
+ // "parameterOrder": [
+ // "projectId",
+ // "jobId"
+ // ],
+ // "parameters": {
+ // "jobId": {
+ // "description": "Job ID of the requested job",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the requested job",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/jobs/{jobId}",
+ // "response": {
+ // "$ref": "Job"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.jobs.getQueryResults":
+
+type JobsGetQueryResultsCall struct {
+ s *Service
+ projectId string
+ jobId string
+ opt_ map[string]interface{}
+}
+
+// GetQueryResults: Retrieves the results of a query job.
+func (r *JobsService) GetQueryResults(projectId string, jobId string) *JobsGetQueryResultsCall {
+ c := &JobsGetQueryResultsCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.jobId = jobId
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of results to read
+func (c *JobsGetQueryResultsCall) MaxResults(maxResults int64) *JobsGetQueryResultsCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Page token,
+// returned by a previous call, to request the next page of results
+func (c *JobsGetQueryResultsCall) PageToken(pageToken string) *JobsGetQueryResultsCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// StartIndex sets the optional parameter "startIndex": Zero-based index
+// of the starting row
+func (c *JobsGetQueryResultsCall) StartIndex(startIndex uint64) *JobsGetQueryResultsCall {
+ c.opt_["startIndex"] = startIndex
+ return c
+}
+
+// TimeoutMs sets the optional parameter "timeoutMs": How long to wait
+// for the query to complete, in milliseconds, before returning. Default
+// is 10 seconds. If the timeout passes before the job completes, the
+// 'jobComplete' field in the response will be false
+func (c *JobsGetQueryResultsCall) TimeoutMs(timeoutMs int64) *JobsGetQueryResultsCall {
+ c.opt_["timeoutMs"] = timeoutMs
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *JobsGetQueryResultsCall) Fields(s ...googleapi.Field) *JobsGetQueryResultsCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *JobsGetQueryResultsCall) Do() (*GetQueryResultsResponse, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["startIndex"]; ok {
+ params.Set("startIndex", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["timeoutMs"]; ok {
+ params.Set("timeoutMs", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries/{jobId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "jobId": c.jobId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *GetQueryResultsResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the results of a query job.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.jobs.getQueryResults",
+ // "parameterOrder": [
+ // "projectId",
+ // "jobId"
+ // ],
+ // "parameters": {
+ // "jobId": {
+ // "description": "Job ID of the query job",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "description": "Maximum number of results to read",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Page token, returned by a previous call, to request the next page of results",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the query job",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "startIndex": {
+ // "description": "Zero-based index of the starting row",
+ // "format": "uint64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "timeoutMs": {
+ // "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is 10 seconds. If the timeout passes before the job completes, the 'jobComplete' field in the response will be false",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // }
+ // },
+ // "path": "projects/{projectId}/queries/{jobId}",
+ // "response": {
+ // "$ref": "GetQueryResultsResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.jobs.insert":
+
+type JobsInsertCall struct {
+ s *Service
+ projectId string
+ job *Job
+ opt_ map[string]interface{}
+ media_ io.Reader
+ resumable_ googleapi.SizeReaderAt
+ mediaType_ string
+ ctx_ context.Context
+ protocol_ string
+}
+
+// Insert: Starts a new asynchronous job. Requires the Can View project
+// role.
+func (r *JobsService) Insert(projectId string, job *Job) *JobsInsertCall {
+ c := &JobsInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.job = job
+ return c
+}
+
+// Media specifies the media to upload in a single chunk.
+// At most one of Media and ResumableMedia may be set.
+func (c *JobsInsertCall) Media(r io.Reader) *JobsInsertCall {
+ c.media_ = r
+ c.protocol_ = "multipart"
+ return c
+}
+
+// ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx.
+// At most one of Media and ResumableMedia may be set.
+// mediaType identifies the MIME media type of the upload, such as "image/png".
+// If mediaType is "", it will be auto-detected.
+func (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *JobsInsertCall {
+ c.ctx_ = ctx
+ c.resumable_ = io.NewSectionReader(r, 0, size)
+ c.mediaType_ = mediaType
+ c.protocol_ = "resumable"
+ return c
+}
+
+// ProgressUpdater provides a callback function that will be called after every chunk.
+// It should be a low-latency function in order to not slow down the upload operation.
+// This should only be called when using ResumableMedia (as opposed to Media).
+func (c *JobsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *JobsInsertCall {
+ c.opt_["progressUpdater"] = pu
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *JobsInsertCall) Fields(s ...googleapi.Field) *JobsInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *JobsInsertCall) Do() (*Job, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.job)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs")
+ var progressUpdater_ googleapi.ProgressUpdater
+ if v, ok := c.opt_["progressUpdater"]; ok {
+ if pu, ok := v.(googleapi.ProgressUpdater); ok {
+ progressUpdater_ = pu
+ }
+ }
+ if c.media_ != nil || c.resumable_ != nil {
+ urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
+ params.Set("uploadType", c.protocol_)
+ }
+ urls += "?" + params.Encode()
+ if c.protocol_ != "resumable" {
+ var cancel func()
+ cancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype)
+ if cancel != nil {
+ defer cancel()
+ }
+ }
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ if c.protocol_ == "resumable" {
+ if c.mediaType_ == "" {
+ c.mediaType_ = googleapi.DetectMediaType(c.resumable_)
+ }
+ req.Header.Set("X-Upload-Content-Type", c.mediaType_)
+ req.Header.Set("Content-Type", "application/json; charset=utf-8")
+ } else {
+ req.Header.Set("Content-Type", ctype)
+ }
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ if c.protocol_ == "resumable" {
+ loc := res.Header.Get("Location")
+ rx := &googleapi.ResumableUpload{
+ Client: c.s.client,
+ UserAgent: c.s.userAgent(),
+ URI: loc,
+ Media: c.resumable_,
+ MediaType: c.mediaType_,
+ ContentLength: c.resumable_.Size(),
+ Callback: progressUpdater_,
+ }
+ res, err = rx.Upload(c.ctx_)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ }
+ var ret *Job
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Starts a new asynchronous job. Requires the Can View project role.",
+ // "httpMethod": "POST",
+ // "id": "bigquery.jobs.insert",
+ // "mediaUpload": {
+ // "accept": [
+ // "*/*"
+ // ],
+ // "protocols": {
+ // "resumable": {
+ // "multipart": true,
+ // "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs"
+ // },
+ // "simple": {
+ // "multipart": true,
+ // "path": "/upload/bigquery/v2/projects/{projectId}/jobs"
+ // }
+ // }
+ // },
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "Project ID of the project that will be billed for the job",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/jobs",
+ // "request": {
+ // "$ref": "Job"
+ // },
+ // "response": {
+ // "$ref": "Job"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsMediaUpload": true
+ // }
+
+}
+
+// method id "bigquery.jobs.list":
+
+type JobsListCall struct {
+ s *Service
+ projectId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all jobs that you started in the specified project. The
+// job list returns in reverse chronological order of when the jobs were
+// created, starting with the most recent job created. Requires the Can
+// View project role, or the Is Owner project role if you set the
+// allUsers property.
+func (r *JobsService) List(projectId string) *JobsListCall {
+ c := &JobsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ return c
+}
+
+// AllUsers sets the optional parameter "allUsers": Whether to display
+// jobs owned by all users in the project. Default false
+func (c *JobsListCall) AllUsers(allUsers bool) *JobsListCall {
+ c.opt_["allUsers"] = allUsers
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of results to return
+func (c *JobsListCall) MaxResults(maxResults int64) *JobsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Page token,
+// returned by a previous call, to request the next page of results
+func (c *JobsListCall) PageToken(pageToken string) *JobsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Projection sets the optional parameter "projection": Restrict
+// information returned to a set of selected fields
+//
+// Possible values:
+// "full" - Includes all job data
+// "minimal" - Does not include the job configuration
+func (c *JobsListCall) Projection(projection string) *JobsListCall {
+ c.opt_["projection"] = projection
+ return c
+}
+
+// StateFilter sets the optional parameter "stateFilter": Filter for job
+// state
+//
+// Possible values:
+// "done" - Finished jobs
+// "pending" - Pending jobs
+// "running" - Running jobs
+func (c *JobsListCall) StateFilter(stateFilter string) *JobsListCall {
+ c.opt_["stateFilter"] = stateFilter
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *JobsListCall) Fields(s ...googleapi.Field) *JobsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *JobsListCall) Do() (*JobList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["allUsers"]; ok {
+ params.Set("allUsers", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["projection"]; ok {
+ params.Set("projection", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["stateFilter"]; ok {
+ params.Set("stateFilter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *JobList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.jobs.list",
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "allUsers": {
+ // "description": "Whether to display jobs owned by all users in the project. Default false",
+ // "location": "query",
+ // "type": "boolean"
+ // },
+ // "maxResults": {
+ // "description": "Maximum number of results to return",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Page token, returned by a previous call, to request the next page of results",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the jobs to list",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Restrict information returned to a set of selected fields",
+ // "enum": [
+ // "full",
+ // "minimal"
+ // ],
+ // "enumDescriptions": [
+ // "Includes all job data",
+ // "Does not include the job configuration"
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "stateFilter": {
+ // "description": "Filter for job state",
+ // "enum": [
+ // "done",
+ // "pending",
+ // "running"
+ // ],
+ // "enumDescriptions": [
+ // "Finished jobs",
+ // "Pending jobs",
+ // "Running jobs"
+ // ],
+ // "location": "query",
+ // "repeated": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/jobs",
+ // "response": {
+ // "$ref": "JobList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.jobs.query":
+
+type JobsQueryCall struct {
+ s *Service
+ projectId string
+ queryrequest *QueryRequest
+ opt_ map[string]interface{}
+}
+
+// Query: Runs a BigQuery SQL query synchronously and returns query
+// results if the query completes within a specified timeout.
+func (r *JobsService) Query(projectId string, queryrequest *QueryRequest) *JobsQueryCall {
+ c := &JobsQueryCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.queryrequest = queryrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *JobsQueryCall) Fields(s ...googleapi.Field) *JobsQueryCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *JobsQueryCall) Do() (*QueryResponse, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.queryrequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *QueryResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.",
+ // "httpMethod": "POST",
+ // "id": "bigquery.jobs.query",
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "Project ID of the project billed for the query",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/queries",
+ // "request": {
+ // "$ref": "QueryRequest"
+ // },
+ // "response": {
+ // "$ref": "QueryResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.projects.list":
+
+type ProjectsListCall struct {
+ s *Service
+ opt_ map[string]interface{}
+}
+
+// List: Lists all projects to which you have been granted any project
+// role.
+func (r *ProjectsService) List() *ProjectsListCall {
+ c := &ProjectsListCall{s: r.s, opt_: make(map[string]interface{})}
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of results to return
+func (c *ProjectsListCall) MaxResults(maxResults int64) *ProjectsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Page token,
+// returned by a previous call, to request the next page of results
+func (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsListCall) Fields(s ...googleapi.Field) *ProjectsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsListCall) Do() (*ProjectList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.SetOpaque(req.URL)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ProjectList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all projects to which you have been granted any project role.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.projects.list",
+ // "parameters": {
+ // "maxResults": {
+ // "description": "Maximum number of results to return",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Page token, returned by a previous call, to request the next page of results",
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects",
+ // "response": {
+ // "$ref": "ProjectList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tabledata.insertAll":
+
+type TabledataInsertAllCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ tableId string
+ tabledatainsertallrequest *TableDataInsertAllRequest
+ opt_ map[string]interface{}
+}
+
+// InsertAll: Streams data into BigQuery one record at a time without
+// needing to run a load job. Requires the WRITER dataset role.
+func (r *TabledataService) InsertAll(projectId string, datasetId string, tableId string, tabledatainsertallrequest *TableDataInsertAllRequest) *TabledataInsertAllCall {
+ c := &TabledataInsertAllCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.tableId = tableId
+ c.tabledatainsertallrequest = tabledatainsertallrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TabledataInsertAllCall) Fields(s ...googleapi.Field) *TabledataInsertAllCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TabledataInsertAllCall) Do() (*TableDataInsertAllResponse, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.tabledatainsertallrequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ "tableId": c.tableId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TableDataInsertAllResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.",
+ // "httpMethod": "POST",
+ // "id": "bigquery.tabledata.insertAll",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId",
+ // "tableId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the destination table.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the destination table.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "tableId": {
+ // "description": "Table ID of the destination table.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll",
+ // "request": {
+ // "$ref": "TableDataInsertAllRequest"
+ // },
+ // "response": {
+ // "$ref": "TableDataInsertAllResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/bigquery.insertdata",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tabledata.list":
+
+type TabledataListCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ tableId string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves table data from a specified set of rows. Requires the
+// READER dataset role.
+func (r *TabledataService) List(projectId string, datasetId string, tableId string) *TabledataListCall {
+ c := &TabledataListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.tableId = tableId
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of results to return
+func (c *TabledataListCall) MaxResults(maxResults int64) *TabledataListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Page token,
+// returned by a previous call, identifying the result set
+func (c *TabledataListCall) PageToken(pageToken string) *TabledataListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// StartIndex sets the optional parameter "startIndex": Zero-based index
+// of the starting row to read
+func (c *TabledataListCall) StartIndex(startIndex uint64) *TabledataListCall {
+ c.opt_["startIndex"] = startIndex
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TabledataListCall) Fields(s ...googleapi.Field) *TabledataListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TabledataListCall) Do() (*TableDataList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["startIndex"]; ok {
+ params.Set("startIndex", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ "tableId": c.tableId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TableDataList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.tabledata.list",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId",
+ // "tableId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the table to read",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "description": "Maximum number of results to return",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Page token, returned by a previous call, identifying the result set",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the table to read",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "startIndex": {
+ // "description": "Zero-based index of the starting row to read",
+ // "format": "uint64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "tableId": {
+ // "description": "Table ID of the table to read",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data",
+ // "response": {
+ // "$ref": "TableDataList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tables.delete":
+
+type TablesDeleteCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ tableId string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the table specified by tableId from the dataset. If
+// the table contains data, all the data will be deleted.
+func (r *TablesService) Delete(projectId string, datasetId string, tableId string) *TablesDeleteCall {
+ c := &TablesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.tableId = tableId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TablesDeleteCall) Fields(s ...googleapi.Field) *TablesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TablesDeleteCall) Do() error {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ "tableId": c.tableId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.",
+ // "httpMethod": "DELETE",
+ // "id": "bigquery.tables.delete",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId",
+ // "tableId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the table to delete",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the table to delete",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "tableId": {
+ // "description": "Table ID of the table to delete",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tables.get":
+
+type TablesGetCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ tableId string
+ opt_ map[string]interface{}
+}
+
+// Get: Gets the specified table resource by table ID. This method does
+// not return the data in the table, it only returns the table resource,
+// which describes the structure of this table.
+func (r *TablesService) Get(projectId string, datasetId string, tableId string) *TablesGetCall {
+ c := &TablesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.tableId = tableId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TablesGetCall) Fields(s ...googleapi.Field) *TablesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TablesGetCall) Do() (*Table, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ "tableId": c.tableId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Table
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.tables.get",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId",
+ // "tableId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the requested table",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the requested table",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "tableId": {
+ // "description": "Table ID of the requested table",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ // "response": {
+ // "$ref": "Table"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tables.insert":
+
+type TablesInsertCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ table *Table
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a new, empty table in the dataset.
+func (r *TablesService) Insert(projectId string, datasetId string, table *Table) *TablesInsertCall {
+ c := &TablesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.table = table
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TablesInsertCall) Fields(s ...googleapi.Field) *TablesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TablesInsertCall) Do() (*Table, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.table)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Table
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a new, empty table in the dataset.",
+ // "httpMethod": "POST",
+ // "id": "bigquery.tables.insert",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the new table",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the new table",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables",
+ // "request": {
+ // "$ref": "Table"
+ // },
+ // "response": {
+ // "$ref": "Table"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tables.list":
+
+type TablesListCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all tables in the specified dataset. Requires the READER
+// dataset role.
+func (r *TablesService) List(projectId string, datasetId string) *TablesListCall {
+ c := &TablesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of results to return
+func (c *TablesListCall) MaxResults(maxResults int64) *TablesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Page token,
+// returned by a previous call, to request the next page of results
+func (c *TablesListCall) PageToken(pageToken string) *TablesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TablesListCall) Fields(s ...googleapi.Field) *TablesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TablesListCall) Do() (*TableList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ })
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TableList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all tables in the specified dataset. Requires the READER dataset role.",
+ // "httpMethod": "GET",
+ // "id": "bigquery.tables.list",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the tables to list",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "description": "Maximum number of results to return",
+ // "format": "uint32",
+ // "location": "query",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Page token, returned by a previous call, to request the next page of results",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the tables to list",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables",
+ // "response": {
+ // "$ref": "TableList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tables.patch":
+
+type TablesPatchCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ tableId string
+ table *Table
+ opt_ map[string]interface{}
+}
+
+// Patch: Updates information in an existing table. The update method
+// replaces the entire table resource, whereas the patch method only
+// replaces fields that are provided in the submitted table resource.
+// This method supports patch semantics.
+func (r *TablesService) Patch(projectId string, datasetId string, tableId string, table *Table) *TablesPatchCall {
+ c := &TablesPatchCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.tableId = tableId
+ c.table = table
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TablesPatchCall) Fields(s ...googleapi.Field) *TablesPatchCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TablesPatchCall) Do() (*Table, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.table)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ "tableId": c.tableId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Table
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "bigquery.tables.patch",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId",
+ // "tableId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the table to update",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the table to update",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "tableId": {
+ // "description": "Table ID of the table to update",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ // "request": {
+ // "$ref": "Table"
+ // },
+ // "response": {
+ // "$ref": "Table"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "bigquery.tables.update":
+
+type TablesUpdateCall struct {
+ s *Service
+ projectId string
+ datasetId string
+ tableId string
+ table *Table
+ opt_ map[string]interface{}
+}
+
+// Update: Updates information in an existing table. The update method
+// replaces the entire table resource, whereas the patch method only
+// replaces fields that are provided in the submitted table resource.
+func (r *TablesService) Update(projectId string, datasetId string, tableId string, table *Table) *TablesUpdateCall {
+ c := &TablesUpdateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.datasetId = datasetId
+ c.tableId = tableId
+ c.table = table
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TablesUpdateCall) Fields(s ...googleapi.Field) *TablesUpdateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TablesUpdateCall) Do() (*Table, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.table)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "datasetId": c.datasetId,
+ "tableId": c.tableId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", c.s.userAgent())
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Table
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.",
+ // "httpMethod": "PUT",
+ // "id": "bigquery.tables.update",
+ // "parameterOrder": [
+ // "projectId",
+ // "datasetId",
+ // "tableId"
+ // ],
+ // "parameters": {
+ // "datasetId": {
+ // "description": "Dataset ID of the table to update",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "Project ID of the table to update",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "tableId": {
+ // "description": "Table ID of the table to update",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
+ // "request": {
+ // "$ref": "Table"
+ // },
+ // "response": {
+ // "$ref": "Table"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/bigquery",
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go
new file mode 100644
index 0000000..3219a5f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go
@@ -0,0 +1,551 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package googleapi contains the common code shared by all Google API
+// libraries.
+package googleapi
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/api/googleapi/internal/uritemplates"
+)
+
+// ContentTyper is an interface for Readers which know (or would like
+// to override) their Content-Type. If a media body doesn't implement
+// ContentTyper, the type is sniffed from the content using
+// http.DetectContentType.
+type ContentTyper interface {
+ ContentType() string
+}
+
+// A SizeReaderAt is a ReaderAt with a Size method.
+// An io.SectionReader implements SizeReaderAt.
+type SizeReaderAt interface {
+ io.ReaderAt
+ Size() int64
+}
+
+const (
+ Version = "0.5"
+
+ // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
+ statusResumeIncomplete = 308
+
+ // UserAgent is the header string used to identify this package.
+ UserAgent = "google-api-go-client/" + Version
+
+ // uploadPause determines the delay between failed upload attempts
+ uploadPause = 1 * time.Second
+)
+
+// Error contains an error response from the server.
+type Error struct {
+ // Code is the HTTP response status code and will always be populated.
+ Code int `json:"code"`
+ // Message is the server response message and is only populated when
+ // explicitly referenced by the JSON server response.
+ Message string `json:"message"`
+ // Body is the raw response returned by the server.
+ // It is often but not always JSON, depending on how the request fails.
+ Body string
+
+ Errors []ErrorItem
+}
+
+// ErrorItem is a detailed error code & message from the Google API frontend.
+type ErrorItem struct {
+ // Reason is the typed error code. For example: "some_example".
+ Reason string `json:"reason"`
+ // Message is the human-readable description of the error.
+ Message string `json:"message"`
+}
+
+func (e *Error) Error() string {
+ if len(e.Errors) == 0 && e.Message == "" {
+ return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
+ if e.Message != "" {
+ fmt.Fprintf(&buf, "%s", e.Message)
+ }
+ if len(e.Errors) == 0 {
+ return strings.TrimSpace(buf.String())
+ }
+ if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
+ fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
+ return buf.String()
+ }
+ fmt.Fprintln(&buf, "\nMore details:")
+ for _, v := range e.Errors {
+ fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
+ }
+ return buf.String()
+}
+
+type errorReply struct {
+ Error *Error `json:"error"`
+}
+
+// CheckResponse returns an error (of type *Error) if the response
+// status code is not 2xx.
+func CheckResponse(res *http.Response) error {
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err == nil {
+ jerr := new(errorReply)
+ err = json.Unmarshal(slurp, jerr)
+ if err == nil && jerr.Error != nil {
+ if jerr.Error.Code == 0 {
+ jerr.Error.Code = res.StatusCode
+ }
+ jerr.Error.Body = string(slurp)
+ return jerr.Error
+ }
+ }
+ return &Error{
+ Code: res.StatusCode,
+ Body: string(slurp),
+ }
+}
+
+type MarshalStyle bool
+
+var WithDataWrapper = MarshalStyle(true)
+var WithoutDataWrapper = MarshalStyle(false)
+
+func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
+ buf := new(bytes.Buffer)
+ if wrap {
+ buf.Write([]byte(`{"data": `))
+ }
+ err := json.NewEncoder(buf).Encode(v)
+ if err != nil {
+ return nil, err
+ }
+ if wrap {
+ buf.Write([]byte(`}`))
+ }
+ return buf, nil
+}
+
+func getMediaType(media io.Reader) (io.Reader, string) {
+ if typer, ok := media.(ContentTyper); ok {
+ return media, typer.ContentType()
+ }
+
+ pr, pw := io.Pipe()
+ typ := "application/octet-stream"
+ buf, err := ioutil.ReadAll(io.LimitReader(media, 512))
+ if err != nil {
+ pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
+ return pr, typ
+ }
+ typ = http.DetectContentType(buf)
+ mr := io.MultiReader(bytes.NewReader(buf), media)
+ go func() {
+ _, err = io.Copy(pw, mr)
+ if err != nil {
+ pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
+ return
+ }
+ pw.Close()
+ }()
+ return pr, typ
+}
+
+// DetectMediaType detects and returns the content type of the provided media.
+// If the type can not be determined, "application/octet-stream" is returned.
+func DetectMediaType(media io.ReaderAt) string {
+ if typer, ok := media.(ContentTyper); ok {
+ return typer.ContentType()
+ }
+
+ typ := "application/octet-stream"
+ buf := make([]byte, 1024)
+ n, err := media.ReadAt(buf, 0)
+ buf = buf[:n]
+ if err == nil || err == io.EOF {
+ typ = http.DetectContentType(buf)
+ }
+ return typ
+}
+
+type Lengther interface {
+ Len() int
+}
+
+// endingWithErrorReader from r until it returns an error. If the
+// final error from r is io.EOF and e is non-nil, e is used instead.
+type endingWithErrorReader struct {
+ r io.Reader
+ e error
+}
+
+func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
+ n, err = er.r.Read(p)
+ if err == io.EOF && er.e != nil {
+ err = er.e
+ }
+ return
+}
+
+func typeHeader(contentType string) textproto.MIMEHeader {
+ h := make(textproto.MIMEHeader)
+ h.Set("Content-Type", contentType)
+ return h
+}
+
+// countingWriter counts the number of bytes it receives to write, but
+// discards them.
+type countingWriter struct {
+ n *int64
+}
+
+func (w countingWriter) Write(p []byte) (int, error) {
+ *w.n += int64(len(p))
+ return len(p), nil
+}
+
+// ConditionallyIncludeMedia does nothing if media is nil.
+//
+// bodyp is an in/out parameter. It should initially point to the
+// reader of the application/json (or whatever) payload to send in the
+// API request. It's updated to point to the multipart body reader.
+//
+// ctypep is an in/out parameter. It should initially point to the
+// content type of the bodyp, usually "application/json". It's updated
+// to the "multipart/related" content type, with random boundary.
+//
+// The return value is the content-length of the entire multpart body.
+func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) {
+ if media == nil {
+ return
+ }
+ // Get the media type, which might return a different reader instance.
+ var mediaType string
+ media, mediaType = getMediaType(media)
+
+ body, bodyType := *bodyp, *ctypep
+
+ pr, pw := io.Pipe()
+ mpw := multipart.NewWriter(pw)
+ *bodyp = pr
+ *ctypep = "multipart/related; boundary=" + mpw.Boundary()
+ go func() {
+ w, err := mpw.CreatePart(typeHeader(bodyType))
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err))
+ return
+ }
+ _, err = io.Copy(w, body)
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err))
+ return
+ }
+
+ w, err = mpw.CreatePart(typeHeader(mediaType))
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err))
+ return
+ }
+ _, err = io.Copy(w, media)
+ if err != nil {
+ mpw.Close()
+ pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err))
+ return
+ }
+ mpw.Close()
+ pw.Close()
+ }()
+ cancel = func() { pw.CloseWithError(errAborted) }
+ return cancel, true
+}
+
+var errAborted = errors.New("googleapi: upload aborted")
+
+// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
+// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
+// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
+type ProgressUpdater func(current, total int64)
+
+// ResumableUpload is used by the generated APIs to provide resumable uploads.
+// It is not used by developers directly.
+type ResumableUpload struct {
+ Client *http.Client
+ // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
+ URI string
+ UserAgent string // User-Agent for header of the request
+ // Media is the object being uploaded.
+ Media io.ReaderAt
+ // MediaType defines the media type, e.g. "image/jpeg".
+ MediaType string
+ // ContentLength is the full size of the object being uploaded.
+ ContentLength int64
+
+ mu sync.Mutex // guards progress
+ progress int64 // number of bytes uploaded so far
+ started bool // whether the upload has been started
+
+ // Callback is an optional function that will be called upon every progress update.
+ Callback ProgressUpdater
+}
+
+var (
+ // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded.
+ rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`)
+ // chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
+ // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
+ chunkSize int64 = 1 << 18
+)
+
+// Progress returns the number of bytes uploaded at this point.
+func (rx *ResumableUpload) Progress() int64 {
+ rx.mu.Lock()
+ defer rx.mu.Unlock()
+ return rx.progress
+}
+
+func (rx *ResumableUpload) transferStatus() (int64, *http.Response, error) {
+ req, _ := http.NewRequest("POST", rx.URI, nil)
+ req.ContentLength = 0
+ req.Header.Set("User-Agent", rx.UserAgent)
+ req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
+ res, err := rx.Client.Do(req)
+ if err != nil || res.StatusCode != statusResumeIncomplete {
+ return 0, res, err
+ }
+ var start int64
+ if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 {
+ start, err = strconv.ParseInt(m[1], 10, 64)
+ if err != nil {
+ return 0, nil, fmt.Errorf("unable to parse range size %v", m[1])
+ }
+ start += 1 // Start at the next byte
+ }
+ return start, res, nil
+}
+
+type chunk struct {
+ body io.Reader
+ size int64
+ err error
+}
+
+func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) {
+ var start int64
+ var err error
+ res := &http.Response{}
+ if rx.started {
+ start, res, err = rx.transferStatus()
+ if err != nil || res.StatusCode != statusResumeIncomplete {
+ return res, err
+ }
+ }
+ rx.started = true
+
+ for {
+ select { // Check for cancellation
+ case <-ctx.Done():
+ res.StatusCode = http.StatusRequestTimeout
+ return res, ctx.Err()
+ default:
+ }
+ reqSize := rx.ContentLength - start
+ if reqSize > chunkSize {
+ reqSize = chunkSize
+ }
+ r := io.NewSectionReader(rx.Media, start, reqSize)
+ req, _ := http.NewRequest("POST", rx.URI, r)
+ req.ContentLength = reqSize
+ req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
+ req.Header.Set("Content-Type", rx.MediaType)
+ req.Header.Set("User-Agent", rx.UserAgent)
+ res, err = rx.Client.Do(req)
+ start += reqSize
+ if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) {
+ rx.mu.Lock()
+ rx.progress = start // keep track of number of bytes sent so far
+ rx.mu.Unlock()
+ if rx.Callback != nil {
+ rx.Callback(start, rx.ContentLength)
+ }
+ }
+ if err != nil || res.StatusCode != statusResumeIncomplete {
+ break
+ }
+ }
+ return res, err
+}
+
+var sleep = time.Sleep // override in unit tests
+
+// Upload starts the process of a resumable upload with a cancellable context.
+// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled.
+// It is called from the auto-generated API code and is not visible to the user.
+// rx is private to the auto-generated API code.
+func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {
+ var res *http.Response
+ var err error
+ for {
+ res, err = rx.transferChunks(ctx)
+ if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
+ return res, err
+ }
+ select { // Check for cancellation
+ case <-ctx.Done():
+ res.StatusCode = http.StatusRequestTimeout
+ return res, ctx.Err()
+ default:
+ }
+ sleep(uploadPause)
+ }
+ return res, err
+}
+
+func ResolveRelative(basestr, relstr string) string {
+ u, _ := url.Parse(basestr)
+ rel, _ := url.Parse(relstr)
+ u = u.ResolveReference(rel)
+ us := u.String()
+ us = strings.Replace(us, "%7B", "{", -1)
+ us = strings.Replace(us, "%7D", "}", -1)
+ return us
+}
+
+// has4860Fix is whether this Go environment contains the fix for
+// http://golang.org/issue/4860
+var has4860Fix bool
+
+// init initializes has4860Fix by checking the behavior of the net/http package.
+func init() {
+ r := http.Request{
+ URL: &url.URL{
+ Scheme: "http",
+ Opaque: "//opaque",
+ },
+ }
+ b := &bytes.Buffer{}
+ r.Write(b)
+ has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
+}
+
+// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
+// don't alter any hex-escaped characters in u.Path.
+func SetOpaque(u *url.URL) {
+ u.Opaque = "//" + u.Host + u.Path
+ if !has4860Fix {
+ u.Opaque = u.Scheme + ":" + u.Opaque
+ }
+}
+
+// Expand subsitutes any {encoded} strings in the URL passed in using
+// the map supplied.
+//
+// This calls SetOpaque to avoid encoding of the parameters in the URL path.
+func Expand(u *url.URL, expansions map[string]string) {
+ expanded, err := uritemplates.Expand(u.Path, expansions)
+ if err == nil {
+ u.Path = expanded
+ SetOpaque(u)
+ }
+}
+
+// CloseBody is used to close res.Body.
+// Prior to calling Close, it also tries to Read a small amount to see an EOF.
+// Not seeing an EOF can prevent HTTP Transports from reusing connections.
+func CloseBody(res *http.Response) {
+ if res == nil || res.Body == nil {
+ return
+ }
+ // Justification for 3 byte reads: two for up to "\r\n" after
+ // a JSON/XML document, and then 1 to see EOF if we haven't yet.
+ // TODO(bradfitz): detect Go 1.3+ and skip these reads.
+ // See https://codereview.appspot.com/58240043
+ // and https://codereview.appspot.com/49570044
+ buf := make([]byte, 1)
+ for i := 0; i < 3; i++ {
+ _, err := res.Body.Read(buf)
+ if err != nil {
+ break
+ }
+ }
+ res.Body.Close()
+
+}
+
+// VariantType returns the type name of the given variant.
+// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
+// This is used to support "variant" APIs that can return one of a number of different types.
+func VariantType(t map[string]interface{}) string {
+ s, _ := t["type"].(string)
+ return s
+}
+
+// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
+// This is used to support "variant" APIs that can return one of a number of different types.
+// It reports whether the conversion was successful.
+func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
+ var buf bytes.Buffer
+ err := json.NewEncoder(&buf).Encode(v)
+ if err != nil {
+ return false
+ }
+ return json.Unmarshal(buf.Bytes(), dst) == nil
+}
+
+// A Field names a field to be retrieved with a partial response.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+//
+// Partial responses can dramatically reduce the amount of data that must be sent to your application.
+// In order to request partial responses, you can specify the full list of fields
+// that your application needs by adding the Fields option to your request.
+//
+// Field strings use camelCase with leading lower-case characters to identify fields within the response.
+//
+// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
+// you could request just those fields like this:
+//
+// svc.Events.List().Fields("nextPageToken", "items/id").Do()
+//
+// or if you were also interested in each Item's "Updated" field, you can combine them like this:
+//
+// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
+//
+// More information about field formatting can be found here:
+// https://developers.google.com/+/api/#fields-syntax
+//
+// Another way to find field names is through the Google API explorer:
+// https://developers.google.com/apis-explorer/#p/
+type Field string
+
+// CombineFields combines fields into a single string.
+func CombineFields(s []Field) string {
+ r := make([]string, len(s))
+ for i, v := range s {
+ r[i] = string(v)
+ }
+ return strings.Join(r, ",")
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
new file mode 100644
index 0000000..de9c88c
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013 Joshua Tacoma
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
new file mode 100644
index 0000000..8a84813
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
@@ -0,0 +1,359 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 4 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+//
+// To use uritemplates, parse a template string and expand it with a value
+// map:
+//
+// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
+// values := make(map[string]interface{})
+// values["user"] = "jtacoma"
+// values["repo"] = "uritemplates"
+// expanded, _ := template.ExpandString(values)
+// fmt.Printf(expanded)
+//
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+func escape(s string, allowReserved bool) (escaped string) {
+ if allowReserved {
+ escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+ return escaped
+}
+
+// A UriTemplate is a parsed representation of a URI template.
+type UriTemplate struct {
+ raw string
+ parts []templatePart
+}
+
+// Parse parses a URI template string into a UriTemplate object.
+func Parse(rawtemplate string) (template *UriTemplate, err error) {
+ template = new(UriTemplate)
+ template.raw = rawtemplate
+ split := strings.Split(rawtemplate, "{")
+ template.parts = make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ err = errors.New("unexpected }")
+ break
+ }
+ template.parts[i].raw = s
+ } else {
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ err = errors.New("malformed template")
+ break
+ }
+ expression := subsplit[0]
+ template.parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ break
+ }
+ template.parts[i*2].raw = subsplit[1]
+ }
+ }
+ if err != nil {
+ template = nil
+ }
+ return template, err
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce a string.
+func (self *UriTemplate) Expand(value interface{}) (string, error) {
+ values, ismap := value.(map[string]interface{})
+ if !ismap {
+ if m, ismap := struct2map(value); !ismap {
+ return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
+ } else {
+ return self.Expand(m)
+ }
+ }
+ var buf bytes.Buffer
+ for _, p := range self.parts {
+ err := p.expand(&buf, values)
+ if err != nil {
+ return "", err
+ }
+ }
+ return buf.String(), nil
+}
+
+func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
+ if len(self.raw) > 0 {
+ buf.WriteString(self.raw)
+ return nil
+ }
+ var zeroLen = buf.Len()
+ buf.WriteString(self.first)
+ var firstLen = buf.Len()
+ for _, term := range self.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if buf.Len() != firstLen {
+ buf.WriteString(self.sep)
+ }
+ switch v := value.(type) {
+ case string:
+ self.expandString(buf, term, v)
+ case []interface{}:
+ self.expandArray(buf, term, v)
+ case map[string]interface{}:
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, v)
+ default:
+ if m, ismap := struct2map(value); ismap {
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, m)
+ } else {
+ str := fmt.Sprintf("%v", value)
+ self.expandString(buf, term, str)
+ }
+ }
+ }
+ if buf.Len() == firstLen {
+ original := buf.Bytes()[:zeroLen]
+ buf.Reset()
+ buf.Write(original)
+ }
+ return nil
+}
+
+func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
+ if self.named {
+ buf.WriteString(name)
+ if empty {
+ buf.WriteString(self.ifemp)
+ } else {
+ buf.WriteString("=")
+ }
+ }
+}
+
+func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ self.expandName(buf, t.name, len(s) == 0)
+ buf.WriteString(escape(s, self.allowReserved))
+}
+
+func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
+ if len(a) == 0 {
+ return
+ } else if !t.explode {
+ self.expandName(buf, t.name, false)
+ }
+ for i, value := range a {
+ if t.explode && i > 0 {
+ buf.WriteString(self.sep)
+ } else if i > 0 {
+ buf.WriteString(",")
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ if self.named && t.explode {
+ self.expandName(buf, t.name, len(s) == 0)
+ }
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+}
+
+func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
+ if len(m) == 0 {
+ return
+ }
+ if !t.explode {
+ self.expandName(buf, t.name, len(m) == 0)
+ }
+ var firstLen = buf.Len()
+ for k, value := range m {
+ if firstLen != buf.Len() {
+ if t.explode {
+ buf.WriteString(self.sep)
+ } else {
+ buf.WriteString(",")
+ }
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if t.explode {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune('=')
+ buf.WriteString(escape(s, self.allowReserved))
+ } else {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune(',')
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+ }
+}
+
+func struct2map(v interface{}) (map[string]interface{}, bool) {
+ value := reflect.ValueOf(v)
+ switch value.Type().Kind() {
+ case reflect.Ptr:
+ return struct2map(value.Elem().Interface())
+ case reflect.Struct:
+ m := make(map[string]interface{})
+ for i := 0; i < value.NumField(); i++ {
+ tag := value.Type().Field(i).Tag
+ var name string
+ if strings.Contains(string(tag), ":") {
+ name = tag.Get("uri")
+ } else {
+ name = strings.TrimSpace(string(tag))
+ }
+ if len(name) == 0 {
+ name = value.Type().Field(i).Name
+ }
+ m[name] = value.Field(i).Interface()
+ }
+ return m, true
+ }
+ return nil, false
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go
new file mode 100644
index 0000000..399ef46
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go
@@ -0,0 +1,13 @@
+package uritemplates
+
+func Expand(path string, expansions map[string]string) (string, error) {
+ template, err := Parse(path)
+ if err != nil {
+ return "", err
+ }
+ values := make(map[string]interface{})
+ for k, v := range expansions {
+ values[k] = v
+ }
+ return template.Expand(values)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go
new file mode 100644
index 0000000..eca1ea2
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/transport/apikey.go
@@ -0,0 +1,38 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package transport contains HTTP transports used to make
+// authenticated API requests.
+package transport
+
+import (
+ "errors"
+ "net/http"
+)
+
+// APIKey is an HTTP Transport which wraps an underlying transport and
+// appends an API Key "key" parameter to the URL of outgoing requests.
+type APIKey struct {
+ // Key is the API Key to set on requests.
+ Key string
+
+ // Transport is the underlying HTTP transport.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+}
+
+func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
+ rt := t.Transport
+ if rt == nil {
+ rt = http.DefaultTransport
+ if rt == nil {
+ return nil, errors.New("googleapi/transport: no Transport specified or available")
+ }
+ }
+ newReq := *req
+ args := newReq.URL.Query()
+ args.Set("key", t.Key)
+ newReq.URL.RawQuery = args.Encode()
+ return rt.RoundTrip(&newReq)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go
new file mode 100644
index 0000000..7ed7dd9
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go
@@ -0,0 +1,150 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package googleapi
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// Int64s is a slice of int64s that marshal as quoted strings in JSON.
+type Int64s []int64
+
+func (q *Int64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, int64(v))
+ }
+ return nil
+}
+
+// Int32s is a slice of int32s that marshal as quoted strings in JSON.
+type Int32s []int32
+
+func (q *Int32s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, int32(v))
+ }
+ return nil
+}
+
+// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
+type Uint64s []uint64
+
+func (q *Uint64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, uint64(v))
+ }
+ return nil
+}
+
+// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
+type Uint32s []uint32
+
+func (q *Uint32s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, uint32(v))
+ }
+ return nil
+}
+
+// Float64s is a slice of float64s that marshal as quoted strings in JSON.
+type Float64s []float64
+
+func (q *Float64s) UnmarshalJSON(raw []byte) error {
+ *q = (*q)[:0]
+ var ss []string
+ if err := json.Unmarshal(raw, &ss); err != nil {
+ return err
+ }
+ for _, s := range ss {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ *q = append(*q, float64(v))
+ }
+ return nil
+}
+
+func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
+ dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
+ dst = append(dst, '[')
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ dst = append(dst, ',')
+ }
+ dst = append(dst, '"')
+ dst = fn(dst, i)
+ dst = append(dst, '"')
+ }
+ dst = append(dst, ']')
+ return dst, nil
+}
+
+func (s Int64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendInt(dst, s[i], 10)
+ })
+}
+
+func (s Int32s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendInt(dst, int64(s[i]), 10)
+ })
+}
+
+func (s Uint64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendUint(dst, s[i], 10)
+ })
+}
+
+func (s Uint32s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendUint(dst, uint64(s[i]), 10)
+ })
+}
+
+func (s Float64s) MarshalJSON() ([]byte, error) {
+ return quotedList(len(s), func(dst []byte, i int) []byte {
+ return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
+ })
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/LICENSE b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE
new file mode 100644
index 0000000..a4c5efd
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
new file mode 100644
index 0000000..6e6fde8
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
@@ -0,0 +1,221 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/cloud/internal"
+)
+
+type cachedValue struct {
+ k string
+ trim bool
+ mu sync.Mutex
+ v string
+}
+
+var (
+ projID = &cachedValue{k: "project/project-id", trim: true}
+ projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+ instID = &cachedValue{k: "instance/id", trim: true}
+)
+
+var metaClient = &http.Client{
+ Transport: &internal.UATransport{
+ Base: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 750 * time.Millisecond,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ ResponseHeaderTimeout: 750 * time.Millisecond,
+ },
+ },
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://metadata/computeMetadata/v1/".
+func Get(suffix string) (string, error) {
+ // Using 169.254.169.254 instead of "metadata" here because Go
+ // binaries built with the "netgo" tag and without cgo won't
+ // know the search suffix for "metadata" is
+ // ".google.internal", and this IP address is documented as
+ // being stable anyway.
+ url := "http://169.254.169.254/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := metaClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(all), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+ s, err = Get(suffix)
+ s = strings.TrimSpace(s)
+ return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+ defer c.mu.Unlock()
+ c.mu.Lock()
+ if c.v != "" {
+ return c.v, nil
+ }
+ if c.trim {
+ v, err = getTrimmed(c.k)
+ } else {
+ v, err = Get(c.k)
+ }
+ if err == nil {
+ c.v = v
+ }
+ return
+}
+
+var onGCE struct {
+ sync.Mutex
+ set bool
+ v bool
+}
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+ defer onGCE.Unlock()
+ onGCE.Lock()
+ if onGCE.set {
+ return onGCE.v
+ }
+ onGCE.set = true
+
+ // We use the DNS name of the metadata service here instead of the IP address
+ // because we expect that to fail faster in the not-on-GCE case.
+ res, err := metaClient.Get("http://metadata.google.internal")
+ if err != nil {
+ return false
+ }
+ onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
+ return onGCE.v
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will probably be of
+// the form "INSTANCENAME.c.PROJECT.internal" but that isn't
+// guaranteed.
+//
+// TODO: what is this defined to be? Docs say "The host name of the
+// instance."
+func Hostname() (string, error) {
+ return getTrimmed("network-interfaces/0/ip")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+ var s []string
+ j, err := Get("instance/tags")
+ if err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+ return instID.get()
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+ j, err := Get(suffix)
+ if err != nil {
+ return nil, err
+ }
+ s := strings.Split(strings.TrimSpace(j), "\n")
+ for i := range s {
+ s[i] = strings.TrimSpace(s[i])
+ }
+ return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+func InstanceAttributeValue(attr string) (string, error) {
+ return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+func ProjectAttributeValue(attr string) (string, error) {
+ return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+ if serviceAccount == "" {
+ serviceAccount = "default"
+ }
+ return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
new file mode 100644
index 0000000..db37727
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
@@ -0,0 +1,62 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+ "fmt"
+ "net/http"
+)
+
+const userAgent = "gcloud-golang/0.1"
+
+// UATransport is an http.RoundTripper that appends
+// Google Cloud client's user-agent to the original
+// request's user-agent header.
+type UATransport struct {
+ // Base represents the actual http.RoundTripper
+ // the requests will be delegated to.
+ Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *UATransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req = cloneRequest(req)
+ ua := req.Header.Get("User-Agent")
+ if ua == "" {
+ ua = userAgent
+ } else {
+ ua = fmt.Sprintf("%s;%s", ua, userAgent)
+ }
+ req.Header.Set("User-Agent", ua)
+ return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return r2
+}
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/.gitignore b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/.gitignore
new file mode 100644
index 0000000..792ca00
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/.gitignore
@@ -0,0 +1,29 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.test
+*.out
+*.txt
+cover.html
+README.html
\ No newline at end of file
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/LICENSE b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/LICENSE
new file mode 100644
index 0000000..6a2ae9a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Dean Karn
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/README.md b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/README.md
new file mode 100644
index 0000000..dd2a4f4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/README.md
@@ -0,0 +1,368 @@
+Package validator
+================
+
+[![Join the chat at https://gitter.im/bluesuncorp/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+![Project status](https://img.shields.io/badge/version-8.17.1-green.svg)
+[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/530054/badge.svg)](https://semaphoreci.com/joeybloggs/validator)
+[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v8&service=github)](https://coveralls.io/github/go-playground/validator?branch=v8)
+[![Go Report Card](http://goreportcard.com/badge/go-playground/validator)](http://goreportcard.com/report/go-playground/validator)
+[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v8?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v8)
+![License](https://img.shields.io/dub/l/vibe-d.svg)
+
+Package validator implements value validations for structs and individual fields based on tags.
+
+It has the following **unique** features:
+
+- Cross Field and Cross Struct validations by using validation tags or custom validators.
+- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
+- Handles type interface by determining it's underlying type prior to validation.
+- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
+- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
+- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
+
+Installation
+------------
+
+Use go get.
+
+ go get gopkg.in/go-playground/validator.v8
+
+or to update
+
+ go get -u gopkg.in/go-playground/validator.v8
+
+Then import the validator package into your own code.
+
+ import "gopkg.in/go-playground/validator.v8"
+
+Error Return Value
+-------
+
+Validation functions return type error
+
+They return type error to avoid the issue discussed in the following, where err is always != nil:
+
+* http://stackoverflow.com/a/29138676/3158232
+* https://github.com/go-playground/validator/issues/134
+
+validator only returns nil or ValidationErrors as type error; so in you code all you need to do
+is check if the error returned is not nil, and if it's not type cast it to type ValidationErrors
+like so:
+
+```go
+err := validate.Struct(mystruct)
+validationErrors := err.(validator.ValidationErrors)
+ ```
+
+Usage and documentation
+------
+
+Please see http://godoc.org/gopkg.in/go-playground/validator.v8 for detailed usage docs.
+
+##### Examples:
+
+Struct & Field validation
+```go
+package main
+
+import (
+ "fmt"
+
+ "gopkg.in/go-playground/validator.v8"
+)
+
+// User contains user information
+type User struct {
+ FirstName string `validate:"required"`
+ LastName string `validate:"required"`
+ Age uint8 `validate:"gte=0,lte=130"`
+ Email string `validate:"required,email"`
+ FavouriteColor string `validate:"hexcolor|rgb|rgba"`
+ Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
+}
+
+// Address houses a users address information
+type Address struct {
+ Street string `validate:"required"`
+ City string `validate:"required"`
+ Planet string `validate:"required"`
+ Phone string `validate:"required"`
+}
+
+var validate *validator.Validate
+
+func main() {
+
+ config := &validator.Config{TagName: "validate"}
+
+ validate = validator.New(config)
+
+ validateStruct()
+ validateField()
+}
+
+func validateStruct() {
+
+ address := &Address{
+ Street: "Eavesdown Docks",
+ Planet: "Persphone",
+ Phone: "none",
+ }
+
+ user := &User{
+ FirstName: "Badger",
+ LastName: "Smith",
+ Age: 135,
+ Email: "Badger.Smith@gmail.com",
+ FavouriteColor: "#000",
+ Addresses: []*Address{address},
+ }
+
+ // returns nil or ValidationErrors ( map[string]*FieldError )
+ errs := validate.Struct(user)
+
+ if errs != nil {
+
+ fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag
+ // Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag
+ err := errs.(validator.ValidationErrors)["User.Addresses[0].City"]
+ fmt.Println(err.Field) // output: City
+ fmt.Println(err.Tag) // output: required
+ fmt.Println(err.Kind) // output: string
+ fmt.Println(err.Type) // output: string
+ fmt.Println(err.Param) // output:
+ fmt.Println(err.Value) // output:
+
+ // from here you can create your own error messages in whatever language you wish
+ return
+ }
+
+ // save user to database
+}
+
+func validateField() {
+ myEmail := "joeybloggs.gmail.com"
+
+ errs := validate.Field(myEmail, "required,email")
+
+ if errs != nil {
+ fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag
+ return
+ }
+
+ // email ok, move on
+}
+```
+
+Custom Field Type
+```go
+package main
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+
+ "gopkg.in/go-playground/validator.v8"
+)
+
+// DbBackedUser User struct
+type DbBackedUser struct {
+ Name sql.NullString `validate:"required"`
+ Age sql.NullInt64 `validate:"required"`
+}
+
+func main() {
+
+ config := &validator.Config{TagName: "validate"}
+
+ validate := validator.New(config)
+
+ // register all sql.Null* types to use the ValidateValuer CustomTypeFunc
+ validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{})
+
+ x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}}
+ errs := validate.Struct(x)
+
+ if len(errs.(validator.ValidationErrors)) > 0 {
+ fmt.Printf("Errs:\n%+v\n", errs)
+ }
+}
+
+// ValidateValuer implements validator.CustomTypeFunc
+func ValidateValuer(field reflect.Value) interface{} {
+ if valuer, ok := field.Interface().(driver.Valuer); ok {
+ val, err := valuer.Value()
+ if err == nil {
+ return val
+ }
+ // handle the error how you want
+ }
+ return nil
+}
+```
+
+Struct Level Validation
+```go
+package main
+
+import (
+ "fmt"
+ "reflect"
+
+ "gopkg.in/go-playground/validator.v8"
+)
+
+// User contains user information
+type User struct {
+ FirstName string `json:"fname"`
+ LastName string `json:"lname"`
+ Age uint8 `validate:"gte=0,lte=130"`
+ Email string `validate:"required,email"`
+ FavouriteColor string `validate:"hexcolor|rgb|rgba"`
+ Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
+}
+
+// Address houses a users address information
+type Address struct {
+ Street string `validate:"required"`
+ City string `validate:"required"`
+ Planet string `validate:"required"`
+ Phone string `validate:"required"`
+}
+
+var validate *validator.Validate
+
+func main() {
+
+ config := &validator.Config{TagName: "validate"}
+
+ validate = validator.New(config)
+ validate.RegisterStructValidation(UserStructLevelValidation, User{})
+
+ validateStruct()
+}
+
+// UserStructLevelValidation contains custom struct level validations that don't always
+// make sense at the field validation level. For Example this function validates that either
+// FirstName or LastName exist; could have done that with a custom field validation but then
+// would have had to add it to both fields duplicating the logic + overhead, this way it's
+// only validated once.
+//
+// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way
+// hooks right into validator and you can combine with validation tags and still have a
+// common error output format.
+func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) {
+
+ user := structLevel.CurrentStruct.Interface().(User)
+
+ if len(user.FirstName) == 0 && len(user.LastName) == 0 {
+ structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname")
+ structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname")
+ }
+
+ // plus can to more, even with different tag than "fnameorlname"
+}
+
+func validateStruct() {
+
+ address := &Address{
+ Street: "Eavesdown Docks",
+ Planet: "Persphone",
+ Phone: "none",
+ City: "Unknown",
+ }
+
+ user := &User{
+ FirstName: "",
+ LastName: "",
+ Age: 45,
+ Email: "Badger.Smith@gmail.com",
+ FavouriteColor: "#000",
+ Addresses: []*Address{address},
+ }
+
+ // returns nil or ValidationErrors ( map[string]*FieldError )
+ errs := validate.Struct(user)
+
+ if errs != nil {
+
+ fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag
+ // Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag
+ err := errs.(validator.ValidationErrors)["User.FirstName"]
+ fmt.Println(err.Field) // output: FirstName
+ fmt.Println(err.Tag) // output: fnameorlname
+ fmt.Println(err.Kind) // output: string
+ fmt.Println(err.Type) // output: string
+ fmt.Println(err.Param) // output:
+ fmt.Println(err.Value) // output:
+
+ // from here you can create your own error messages in whatever language you wish
+ return
+ }
+
+ // save user to database
+}
+```
+
+Benchmarks
+------
+###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.3 darwin/amd64
+```go
+go test -cpu=4 -bench=. -benchmem=true
+PASS
+BenchmarkFieldSuccess-4 10000000 167 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldFailure-4 2000000 701 ns/op 432 B/op 4 allocs/op
+BenchmarkFieldDiveSuccess-4 500000 2937 ns/op 480 B/op 27 allocs/op
+BenchmarkFieldDiveFailure-4 500000 3536 ns/op 912 B/op 31 allocs/op
+BenchmarkFieldCustomTypeSuccess-4 5000000 341 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeFailure-4 2000000 679 ns/op 432 B/op 4 allocs/op
+BenchmarkFieldOrTagSuccess-4 1000000 1157 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagFailure-4 1000000 1109 ns/op 464 B/op 6 allocs/op
+BenchmarkStructLevelValidationSuccess-4 2000000 694 ns/op 176 B/op 6 allocs/op
+BenchmarkStructLevelValidationFailure-4 1000000 1311 ns/op 640 B/op 11 allocs/op
+BenchmarkStructSimpleCustomTypeSuccess-4 2000000 894 ns/op 80 B/op 5 allocs/op
+BenchmarkStructSimpleCustomTypeFailure-4 1000000 1496 ns/op 688 B/op 11 allocs/op
+BenchmarkStructPartialSuccess-4 1000000 1229 ns/op 384 B/op 10 allocs/op
+BenchmarkStructPartialFailure-4 1000000 1838 ns/op 832 B/op 15 allocs/op
+BenchmarkStructExceptSuccess-4 2000000 961 ns/op 336 B/op 7 allocs/op
+BenchmarkStructExceptFailure-4 1000000 1218 ns/op 384 B/op 10 allocs/op
+BenchmarkStructSimpleCrossFieldSuccess-4 2000000 954 ns/op 128 B/op 6 allocs/op
+BenchmarkStructSimpleCrossFieldFailure-4 1000000 1569 ns/op 592 B/op 11 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccess-4 1000000 1588 ns/op 192 B/op 10 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailure-4 1000000 2217 ns/op 656 B/op 15 allocs/op
+BenchmarkStructSimpleSuccess-4 2000000 925 ns/op 48 B/op 3 allocs/op
+BenchmarkStructSimpleFailure-4 1000000 1650 ns/op 688 B/op 11 allocs/op
+BenchmarkStructSimpleSuccessParallel-4 5000000 261 ns/op 48 B/op 3 allocs/op
+BenchmarkStructSimpleFailureParallel-4 2000000 758 ns/op 688 B/op 11 allocs/op
+BenchmarkStructComplexSuccess-4 300000 5868 ns/op 544 B/op 32 allocs/op
+BenchmarkStructComplexFailure-4 200000 10767 ns/op 3912 B/op 77 allocs/op
+BenchmarkStructComplexSuccessParallel-4 1000000 1559 ns/op 544 B/op 32 allocs/op
+BenchmarkStructComplexFailureParallel-4 500000 3747 ns/op 3912 B/op 77 allocs
+```
+
+Complimentary Software
+----------------------
+
+Here is a list of software that compliments using this library either pre or post validation.
+
+* [Gorilla Schema](https://github.com/gorilla/schema) - Package gorilla/schema fills a struct with form values.
+* [Conform](https://github.com/leebenson/conform) - Trims, sanitizes & scrubs data based on struct tags.
+
+How to Contribute
+------
+
+There will always be a development branch for each version i.e. `v1-development`. In order to contribute,
+please make your pull requests against those branches.
+
+If the changes being proposed or requested are breaking changes, please create an issue, for discussion
+or create a pull request against the highest development branch for example this package has a
+v1 and v1-development branch however, there will also be a v2-development branch even though v2 doesn't exist yet.
+
+I strongly encourage everyone whom creates a custom validation function to contribute them and
+help make this package even better.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/baked_in.go b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/baked_in.go
new file mode 100644
index 0000000..3dcb0de
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/baked_in.go
@@ -0,0 +1,1418 @@
+package validator
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// BakedInAliasValidators is a default mapping of a single validationstag that
+// defines a common or complex set of validation(s) to simplify
+// adding validation to structs. i.e. set key "_ageok" and the tags
+// are "gt=0,lte=130" or key "_preferredname" and tags "omitempty,gt=0,lte=60"
+var bakedInAliasValidators = map[string]string{
+ "iscolor": "hexcolor|rgb|rgba|hsl|hsla",
+}
+
+// BakedInValidators is the default map of ValidationFunc
+// you can add, remove or even replace items to suite your needs,
+// or even disregard and use your own map if so desired.
+var bakedInValidators = map[string]Func{
+ "required": HasValue,
+ "len": HasLengthOf,
+ "min": HasMinOf,
+ "max": HasMaxOf,
+ "eq": IsEq,
+ "ne": IsNe,
+ "lt": IsLt,
+ "lte": IsLte,
+ "gt": IsGt,
+ "gte": IsGte,
+ "eqfield": IsEqField,
+ "eqcsfield": IsEqCrossStructField,
+ "necsfield": IsNeCrossStructField,
+ "gtcsfield": IsGtCrossStructField,
+ "gtecsfield": IsGteCrossStructField,
+ "ltcsfield": IsLtCrossStructField,
+ "ltecsfield": IsLteCrossStructField,
+ "nefield": IsNeField,
+ "gtefield": IsGteField,
+ "gtfield": IsGtField,
+ "ltefield": IsLteField,
+ "ltfield": IsLtField,
+ "alpha": IsAlpha,
+ "alphanum": IsAlphanum,
+ "numeric": IsNumeric,
+ "number": IsNumber,
+ "hexadecimal": IsHexadecimal,
+ "hexcolor": IsHEXColor,
+ "rgb": IsRGB,
+ "rgba": IsRGBA,
+ "hsl": IsHSL,
+ "hsla": IsHSLA,
+ "email": IsEmail,
+ "url": IsURL,
+ "uri": IsURI,
+ "base64": IsBase64,
+ "contains": Contains,
+ "containsany": ContainsAny,
+ "containsrune": ContainsRune,
+ "excludes": Excludes,
+ "excludesall": ExcludesAll,
+ "excludesrune": ExcludesRune,
+ "isbn": IsISBN,
+ "isbn10": IsISBN10,
+ "isbn13": IsISBN13,
+ "uuid": IsUUID,
+ "uuid3": IsUUID3,
+ "uuid4": IsUUID4,
+ "uuid5": IsUUID5,
+ "ascii": IsASCII,
+ "printascii": IsPrintableASCII,
+ "multibyte": HasMultiByteCharacter,
+ "datauri": IsDataURI,
+ "latitude": IsLatitude,
+ "longitude": IsLongitude,
+ "ssn": IsSSN,
+ "ipv4": IsIPv4,
+ "ipv6": IsIPv6,
+ "ip": IsIP,
+ "cidrv4": IsCIDRv4,
+ "cidrv6": IsCIDRv6,
+ "cidr": IsCIDR,
+ "tcp4_addr": IsTCP4AddrResolvable,
+ "tcp6_addr": IsTCP6AddrResolvable,
+ "tcp_addr": IsTCPAddrResolvable,
+ "udp4_addr": IsUDP4AddrResolvable,
+ "udp6_addr": IsUDP6AddrResolvable,
+ "udp_addr": IsUDPAddrResolvable,
+ "ip4_addr": IsIP4AddrResolvable,
+ "ip6_addr": IsIP6AddrResolvable,
+ "ip_addr": IsIPAddrResolvable,
+ "unix_addr": IsUnixAddrResolvable,
+ "mac": IsMAC,
+}
+
+// IsMAC is the validation function for validating if the field's value is a valid MAC address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsMAC(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ _, err := net.ParseMAC(field.String())
+ return err == nil
+}
+
+// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsCIDRv4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ ip, _, err := net.ParseCIDR(field.String())
+
+ return err == nil && ip.To4() != nil
+}
+
+// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsCIDRv6(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ ip, _, err := net.ParseCIDR(field.String())
+
+ return err == nil && ip.To4() == nil
+}
+
+// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsCIDR(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ _, _, err := net.ParseCIDR(field.String())
+
+ return err == nil
+}
+
+// IsIPv4 is the validation function for validating if a value is a valid v4 IP address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsIPv4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ ip := net.ParseIP(field.String())
+
+ return ip != nil && ip.To4() != nil
+}
+
+// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsIPv6(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ ip := net.ParseIP(field.String())
+
+ return ip != nil && ip.To4() == nil
+}
+
+// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsIP(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ ip := net.ParseIP(field.String())
+
+ return ip != nil
+}
+
+// IsSSN is the validation function for validating if the field's value is a valid SSN.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsSSN(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if field.Len() != 11 {
+ return false
+ }
+
+ return sSNRegex.MatchString(field.String())
+}
+
+// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLongitude(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return longitudeRegex.MatchString(field.String())
+}
+
+// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLatitude(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return latitudeRegex.MatchString(field.String())
+}
+
+// IsDataURI is the validation function for validating if the field's value is a valid data URI.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsDataURI(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ uri := strings.SplitN(field.String(), ",", 2)
+
+ if len(uri) != 2 {
+ return false
+ }
+
+ if !dataURIRegex.MatchString(uri[0]) {
+ return false
+ }
+
+ fld := reflect.ValueOf(uri[1])
+
+ return IsBase64(v, topStruct, currentStructOrField, fld, fld.Type(), fld.Kind(), param)
+}
+
+// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func HasMultiByteCharacter(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if field.Len() == 0 {
+ return true
+ }
+
+ return multibyteRegex.MatchString(field.String())
+}
+
+// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsPrintableASCII(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return printableASCIIRegex.MatchString(field.String())
+}
+
+// IsASCII is the validation function for validating if the field's value is a valid ASCII character.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsASCII(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return aSCIIRegex.MatchString(field.String())
+}
+
+// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUUID5(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return uUID5Regex.MatchString(field.String())
+}
+
+// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUUID4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return uUID4Regex.MatchString(field.String())
+}
+
+// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUUID3(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return uUID3Regex.MatchString(field.String())
+}
+
+// IsUUID is the validation function for validating if the field's value is a valid UUID of any version.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUUID(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return uUIDRegex.MatchString(field.String())
+}
+
+// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsISBN(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return IsISBN10(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) || IsISBN13(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsISBN13(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ s := strings.Replace(strings.Replace(field.String(), "-", "", 4), " ", "", 4)
+
+ if !iSBN13Regex.MatchString(s) {
+ return false
+ }
+
+ var checksum int32
+ var i int32
+
+ factor := []int32{1, 3}
+
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(s[i]-'0')
+ }
+
+ if (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 {
+ return true
+ }
+
+ return false
+}
+
+// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsISBN10(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ s := strings.Replace(strings.Replace(field.String(), "-", "", 3), " ", "", 3)
+
+ if !iSBN10Regex.MatchString(s) {
+ return false
+ }
+
+ var checksum int32
+ var i int32
+
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(s[i]-'0')
+ }
+
+ if s[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(s[9]-'0')
+ }
+
+ if checksum%11 == 0 {
+ return true
+ }
+
+ return false
+}
+
+// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified withing the param.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func ExcludesRune(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return !ContainsRune(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified withing the param.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func ExcludesAll(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return !ContainsAny(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// Excludes is the validation function for validating that the field's value does not contain the text specified withing the param.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func Excludes(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return !Contains(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// ContainsRune is the validation function for validating that the field's value contains the rune specified withing the param.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func ContainsRune(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ r, _ := utf8.DecodeRuneInString(param)
+
+ return strings.ContainsRune(field.String(), r)
+}
+
+// ContainsAny is the validation function for validating that the field's value contains any of the characters specified withing the param.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func ContainsAny(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return strings.ContainsAny(field.String(), param)
+}
+
+// Contains is the validation function for validating that the field's value contains the text specified withing the param.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func Contains(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return strings.Contains(field.String(), param)
+}
+
+// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsNeField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param)
+
+ if !ok || currentKind != fieldKind {
+ return true
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() != currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() != currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() != currentField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) != int64(currentField.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return true
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return !fieldTime.Equal(t)
+ }
+
+ }
+
+ // default reflect.String:
+ return field.String() != currentField.String()
+}
+
+// IsNe is the validation function for validating that the field's value does not equal the provided param value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsNe(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return !IsEq(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLteCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ topField, topKind, ok := v.GetStructFieldOK(topStruct, param)
+ if !ok || topKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() <= topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() <= topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() <= topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) <= int64(topField.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.Before(topTime) || fieldTime.Equal(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() <= topField.String()
+}
+
+// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLtCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ topField, topKind, ok := v.GetStructFieldOK(topStruct, param)
+ if !ok || topKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() < topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() < topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() < topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) < int64(topField.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.Before(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() < topField.String()
+}
+
+// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsGteCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ topField, topKind, ok := v.GetStructFieldOK(topStruct, param)
+ if !ok || topKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() >= topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() >= topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() >= topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) >= int64(topField.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.After(topTime) || fieldTime.Equal(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() >= topField.String()
+}
+
+// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsGtCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ topField, topKind, ok := v.GetStructFieldOK(topStruct, param)
+ if !ok || topKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() > topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() > topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() > topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) > int64(topField.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.After(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() > topField.String()
+}
+
+// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsNeCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ topField, currentKind, ok := v.GetStructFieldOK(topStruct, param)
+ if !ok || currentKind != fieldKind {
+ return true
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return topField.Int() != field.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return topField.Uint() != field.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return topField.Float() != field.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(topField.Len()) != int64(field.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return true
+ }
+
+ if fieldType == timeType {
+
+ t := field.Interface().(time.Time)
+ fieldTime := topField.Interface().(time.Time)
+
+ return !fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String:
+ return topField.String() != field.String()
+}
+
+// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsEqCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ topField, topKind, ok := v.GetStructFieldOK(topStruct, param)
+ if !ok || topKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return topField.Int() == field.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return topField.Uint() == field.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return topField.Float() == field.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(topField.Len()) == int64(field.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := field.Interface().(time.Time)
+ fieldTime := topField.Interface().(time.Time)
+
+ return fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String:
+ return topField.String() == field.String()
+}
+
+// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsEqField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param)
+ if !ok || currentKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() == currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() == currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() == currentField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) == int64(currentField.Len())
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Equal(t)
+ }
+
+ }
+
+ // default reflect.String:
+ return field.String() == currentField.String()
+}
+
+// IsEq is the validation function for validating if the current field's value is equal to the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsEq(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+ return field.String() == param
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) == p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asInt(param)
+
+ return field.Int() == p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() == p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() == p
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsBase64 is the validation function for validating if the current field's value is a valid base 64.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsBase64(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return base64Regex.MatchString(field.String())
+}
+
+// IsURI is the validation function for validating if the current field's value is a valid URI.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsURI(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+
+ s := field.String()
+
+ // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
+ // emulate browser and strip the '#' suffix prior to validation. see issue-#237
+ if i := strings.Index(s, "#"); i > -1 {
+ s = s[:i]
+ }
+
+ if s == blank {
+ return false
+ }
+
+ _, err := url.ParseRequestURI(s)
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsURL is the validation function for validating if the current field's value is a valid URL.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsURL(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+
+ var i int
+ s := field.String()
+
+ // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
+ // emulate browser and strip the '#' suffix prior to validation. see issue-#237
+ if i = strings.Index(s, "#"); i > -1 {
+ s = s[:i]
+ }
+
+ if s == blank {
+ return false
+ }
+
+ url, err := url.ParseRequestURI(s)
+
+ if err != nil || url.Scheme == blank {
+ return false
+ }
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsEmail is the validation function for validating if the current field's value is a valid email address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsEmail(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return emailRegex.MatchString(field.String())
+}
+
+// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsHSLA(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return hslaRegex.MatchString(field.String())
+}
+
+// IsHSL is the validation function for validating if the current field's value is a valid HSL color.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsHSL(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return hslRegex.MatchString(field.String())
+}
+
+// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsRGBA(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return rgbaRegex.MatchString(field.String())
+}
+
+// IsRGB is the validation function for validating if the current field's value is a valid RGB color.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsRGB(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return rgbRegex.MatchString(field.String())
+}
+
+// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsHEXColor(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return hexcolorRegex.MatchString(field.String())
+}
+
+// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsHexadecimal(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return hexadecimalRegex.MatchString(field.String())
+}
+
+// IsNumber is the validation function for validating if the current field's value is a valid number.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsNumber(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return numberRegex.MatchString(field.String())
+}
+
+// IsNumeric is the validation function for validating if the current field's value is a valid numeric value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsNumeric(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return numericRegex.MatchString(field.String())
+}
+
+// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsAlphanum(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return alphaNumericRegex.MatchString(field.String())
+}
+
+// IsAlpha is the validation function for validating if the current field's value is a valid alpha value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsAlpha(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return alphaRegex.MatchString(field.String())
+}
+
+// HasValue is the validation function for validating if the current field's value is not the default static value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func HasValue(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return !field.IsNil()
+ default:
+ return field.IsValid() && field.Interface() != reflect.Zero(fieldType).Interface()
+ }
+}
+
+// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsGteField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param)
+ if !ok || currentKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() >= currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() >= currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() >= currentField.Float()
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.After(t) || fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) >= len(currentField.String())
+}
+
+// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsGtField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param)
+ if !ok || currentKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() > currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() > currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() > currentField.Float()
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.After(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) > len(currentField.String())
+}
+
+// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsGte(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) >= p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) >= p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asInt(param)
+
+ return field.Int() >= p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() >= p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() >= p
+
+ case reflect.Struct:
+
+ if fieldType == timeType || fieldType == timePtrType {
+
+ now := time.Now().UTC()
+ t := field.Interface().(time.Time)
+
+ return t.After(now) || t.Equal(now)
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsGt is the validation function for validating if the current field's value is greater than the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsGt(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) > p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) > p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asInt(param)
+
+ return field.Int() > p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() > p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() > p
+ case reflect.Struct:
+
+ if field.Type() == timeType || field.Type() == timePtrType {
+
+ return field.Interface().(time.Time).After(time.Now().UTC())
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func HasLengthOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) == p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) == p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asInt(param)
+
+ return field.Int() == p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() == p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() == p
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func HasMinOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ return IsGte(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLteField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param)
+ if !ok || currentKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() <= currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() <= currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() <= currentField.Float()
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Before(t) || fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) <= len(currentField.String())
+}
+
+// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLtField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param)
+ if !ok || currentKind != fieldKind {
+ return false
+ }
+
+ switch fieldKind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() < currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() < currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() < currentField.Float()
+
+ case reflect.Struct:
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Before(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) < len(currentField.String())
+}
+
+// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLte(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) <= p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) <= p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asInt(param)
+
+ return field.Int() <= p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() <= p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() <= p
+
+ case reflect.Struct:
+
+ if fieldType == timeType || fieldType == timePtrType {
+
+ now := time.Now().UTC()
+ t := field.Interface().(time.Time)
+
+ return t.Before(now) || t.Equal(now)
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsLt is the validation function for validating if the current field's value is less than the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsLt(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ switch fieldKind {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) < p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) < p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asInt(param)
+
+ return field.Int() < p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() < p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() < p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType || field.Type() == timePtrType {
+
+ return field.Interface().(time.Time).Before(time.Now().UTC())
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func HasMaxOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ return IsLte(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param)
+}
+
+// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsTCP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp4", field.String())
+ return err == nil
+}
+
+// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsTCP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp6", field.String())
+ return err == nil
+}
+
+// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsTCPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) &&
+ !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp", field.String())
+ return err == nil
+}
+
+// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUDP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp4", field.String())
+ return err == nil
+}
+
+// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUDP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp6", field.String())
+ return err == nil
+}
+
+// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUDPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) &&
+ !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp", field.String())
+ return err == nil
+}
+
+// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsIP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !IsIPv4(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip4", field.String())
+ return err == nil
+}
+
+// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsIP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !IsIPv6(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip6", field.String())
+ return err == nil
+}
+
+// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsIPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if !IsIP(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip", field.String())
+ return err == nil
+}
+
+// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func IsUnixAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ _, err := net.ResolveUnixAddr("unix", field.String())
+ return err == nil
+}
+
+func isIP4Addr(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ val := field.String()
+
+ if idx := strings.LastIndex(val, ":"); idx != -1 {
+ val = val[0:idx]
+ }
+
+ if !IsIPv4(v, topStruct, currentStructOrField, reflect.ValueOf(val), fieldType, fieldKind, param) {
+ return false
+ }
+
+ return true
+}
+
+func isIP6Addr(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+ val := field.String()
+
+ if idx := strings.LastIndex(val, ":"); idx != -1 {
+ if idx != 0 && val[idx-1:idx] == "]" {
+ val = val[1 : idx-1]
+ }
+ }
+
+ if !IsIPv6(v, topStruct, currentStructOrField, reflect.ValueOf(val), fieldType, fieldKind, param) {
+ return false
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/cache.go b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/cache.go
new file mode 100644
index 0000000..289226e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/cache.go
@@ -0,0 +1,71 @@
+package validator
+
+import (
+ "reflect"
+ "sync"
+)
+
+type cachedField struct {
+ Idx int
+ Name string
+ AltName string
+ CachedTag *cachedTag
+}
+
+type cachedStruct struct {
+ Name string
+ fields map[int]cachedField
+}
+
+type structCacheMap struct {
+ lock sync.RWMutex
+ m map[reflect.Type]*cachedStruct
+}
+
+func (s *structCacheMap) Get(key reflect.Type) (*cachedStruct, bool) {
+ s.lock.RLock()
+ value, ok := s.m[key]
+ s.lock.RUnlock()
+ return value, ok
+}
+
+func (s *structCacheMap) Set(key reflect.Type, value *cachedStruct) {
+ s.lock.Lock()
+ s.m[key] = value
+ s.lock.Unlock()
+}
+
+type cachedTag struct {
+ tag string
+ isOmitEmpty bool
+ isNoStructLevel bool
+ isStructOnly bool
+ diveTag string
+ tags []*tagVals
+}
+
+type tagVals struct {
+ tagVals [][]string
+ isOrVal bool
+ isAlias bool
+ tag string
+}
+
+type tagCacheMap struct {
+ lock sync.RWMutex
+ m map[string]*cachedTag
+}
+
+func (s *tagCacheMap) Get(key string) (*cachedTag, bool) {
+ s.lock.RLock()
+ value, ok := s.m[key]
+ s.lock.RUnlock()
+
+ return value, ok
+}
+
+func (s *tagCacheMap) Set(key string, value *cachedTag) {
+ s.lock.Lock()
+ s.m[key] = value
+ s.lock.Unlock()
+}
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/doc.go b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/doc.go
new file mode 100644
index 0000000..c351a61
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/doc.go
@@ -0,0 +1,852 @@
+/*
+Package validator implements value validations for structs and individual fields
+based on tags.
+
+It can also handle Cross-Field and Cross-Struct validation for nested structs
+and has the ability to dive into arrays and maps of any type.
+
+Why not a better error message?
+Because this library intends for you to handle your own error messages.
+
+Why should I handle my own errors?
+Many reasons. We built an internationalized application and needed to know the
+field, and what validation failed so we could provide a localized error.
+
+ if fieldErr.Field == "Name" {
+ switch fieldErr.ErrorTag
+ case "required":
+ return "Translated string based on field + error"
+ default:
+ return "Translated string based on field"
+ }
+
+
+Validation Functions Return Type error
+
+Doing things this way is actually the way the standard library does, see the
+file.Open method here:
+
+ https://golang.org/pkg/os/#Open.
+
+The authors return type "error" to avoid the issue discussed in the following,
+where err is always != nil:
+
+ http://stackoverflow.com/a/29138676/3158232
+ https://github.com/go-playground/validator/issues/134
+
+Validator only returns nil or ValidationErrors as type error; so, in your code
+all you need to do is check if the error returned is not nil, and if it's not
+type cast it to type ValidationErrors like so err.(validator.ValidationErrors).
+
+Custom Functions
+
+Custom functions can be added. Example:
+
+ // Structure
+ func customFunc(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
+
+ if whatever {
+ return false
+ }
+
+ return true
+ }
+
+ validate.RegisterValidation("custom tag name", customFunc)
+ // NOTES: using the same tag name as an existing function
+ // will overwrite the existing one
+
+Cross-Field Validation
+
+Cross-Field Validation can be done via the following tags:
+ - eqfield
+ - nefield
+ - gtfield
+ - gtefield
+ - ltfield
+ - ltefield
+ - eqcsfield
+ - necsfield
+ - gtcsfield
+ - ftecsfield
+ - ltcsfield
+ - ltecsfield
+
+If, however, some custom cross-field validation is required, it can be done
+using a custom validation.
+
+Why not just have cross-fields validation tags (i.e. only eqcsfield and not
+eqfield)?
+
+The reason is efficiency. If you want to check a field within the same struct
+"eqfield" only has to find the field on the same struct (1 level). But, if we
+used "eqcsfield" it could be multiple levels down. Example:
+
+ type Inner struct {
+ StartDate time.Time
+ }
+
+ type Outer struct {
+ InnerStructField *Inner
+ CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"`
+ }
+
+ now := time.Now()
+
+ inner := &Inner{
+ StartDate: now,
+ }
+
+ outer := &Outer{
+ InnerStructField: inner,
+ CreatedAt: now,
+ }
+
+ errs := validate.Struct(outer)
+
+ // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
+ // into the function
+ // when calling validate.FieldWithValue(val, field, tag) val will be
+ // whatever you pass, struct, field...
+ // when calling validate.Field(field, tag) val will be nil
+
+Multiple Validators
+
+Multiple validators on a field will process in the order defined. Example:
+
+ type Test struct {
+ Field `validate:"max=10,min=1"`
+ }
+
+ // max will be checked then min
+
+Bad Validator definitions are not handled by the library. Example:
+
+ type Test struct {
+ Field `validate:"min=10,max=0"`
+ }
+
+ // this definition of min max will never succeed
+
+Using Validator Tags
+
+Baked In Cross-Field validation only compares fields on the same struct.
+If Cross-Field + Cross-Struct validation is needed you should implement your
+own custom validator.
+
+Comma (",") is the default separator of validation tags. If you wish to
+have a comma included within the parameter (i.e. excludesall=,) you will need to
+use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
+so the above will become excludesall=0x2C.
+
+ type Test struct {
+ Field `validate:"excludesall=,"` // BAD! Do not include a comma.
+ Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
+ }
+
+Pipe ("|") is the default separator of validation tags. If you wish to
+have a pipe included within the parameter i.e. excludesall=| you will need to
+use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
+so the above will become excludesall=0x7C
+
+ type Test struct {
+ Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
+ Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
+ }
+
+
+Baked In Validators and Tags
+
+Here is a list of the current built in validators:
+
+
+Skip Field
+
+Tells the validation to skip this struct field; this is particularily
+handy in ignoring embedded structs from being validated. (Usage: -)
+ Usage: -
+
+
+Or Operator
+
+This is the 'or' operator allowing multiple validators to be used and
+accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba
+colors to be accepted. This can also be combined with 'and' for example
+( Usage: omitempty,rgb|rgba)
+
+ Usage: |
+
+StructOnly
+
+When a field that is a nested struct is encountered, and contains this flag
+any validation on the nested struct will be run, but none of the nested
+struct fields will be validated. This is usefull if inside of you program
+you know the struct will be valid, but need to verify it has been assigned.
+NOTE: only "required" and "omitempty" can be used on a struct itself.
+
+ Usage: structonly
+
+NoStructLevel
+
+Same as structonly tag except that any struct level validations will not run.
+
+ Usage: nostructlevel
+
+Exists
+
+Is a special tag without a validation function attached. It is used when a field
+is a Pointer, Interface or Invalid and you wish to validate that it exists.
+Example: want to ensure a bool exists if you define the bool as a pointer and
+use exists it will ensure there is a value; couldn't use required as it would
+fail when the bool was false. exists will fail is the value is a Pointer, Interface
+or Invalid and is nil.
+
+ Usage: exists
+
+Omit Empty
+
+Allows conditional validation, for example if a field is not set with
+a value (Determined by the "required" validator) then other validation
+such as min or max won't run, but if a value is set validation will run.
+
+ Usage: omitempty
+
+Dive
+
+This tells the validator to dive into a slice, array or map and validate that
+level of the slice, array or map with the validation tags that follow.
+Multidimensional nesting is also supported, each level you wish to dive will
+require another dive tag.
+
+ Usage: dive
+
+Example #1
+
+ [][]string with validation tag "gt=0,dive,len=1,dive,required"
+ // gt=0 will be applied to []
+ // len=1 will be applied to []string
+ // required will be applied to string
+
+Example #2
+
+ [][]string with validation tag "gt=0,dive,dive,required"
+ // gt=0 will be applied to []
+ // []string will be spared validation
+ // required will be applied to string
+
+Required
+
+This validates that the value is not the data types default zero value.
+For numbers ensures value is not zero. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required
+
+Length
+
+For numbers, max will ensure that the value is
+equal to the parameter given. For strings, it checks that
+the string length is exactly that number of characters. For slices,
+arrays, and maps, validates the number of items.
+
+ Usage: len=10
+
+Maximum
+
+For numbers, max will ensure that the value is
+less than or equal to the parameter given. For strings, it checks
+that the string length is at most that number of characters. For
+slices, arrays, and maps, validates the number of items.
+
+ Usage: max=10
+
+Mininum
+
+For numbers, min will ensure that the value is
+greater or equal to the parameter given. For strings, it checks that
+the string length is at least that number of characters. For slices,
+arrays, and maps, validates the number of items.
+
+ Usage: min=10
+
+Equals
+
+For strings & numbers, eq will ensure that the value is
+equal to the parameter given. For slices, arrays, and maps,
+validates the number of items.
+
+ Usage: eq=10
+
+Not Equal
+
+For strings & numbers, eq will ensure that the value is not
+equal to the parameter given. For slices, arrays, and maps,
+validates the number of items.
+
+ Usage: eq=10
+
+Greater Than
+
+For numbers, this will ensure that the value is greater than the
+parameter given. For strings, it checks that the string length
+is greater than that number of characters. For slices, arrays
+and maps it validates the number of items.
+
+Example #1
+
+ Usage: gt=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is greater than time.Now.UTC().
+
+ Usage: gt
+
+Greater Than or Equal
+
+Same as 'min' above. Kept both to make terminology with 'len' easier.
+
+
+Example #1
+
+ Usage: gte=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is greater than or equal to time.Now.UTC().
+
+ Usage: gte
+
+Less Than
+
+For numbers, this will ensure that the value is less than the parameter given.
+For strings, it checks that the string length is less than that number of
+characters. For slices, arrays, and maps it validates the number of items.
+
+Example #1
+
+ Usage: lt=10
+
+Example #2 (time.Time)
+For time.Time ensures the time value is less than time.Now.UTC().
+
+ Usage: lt
+
+Less Than or Equal
+
+Same as 'max' above. Kept both to make terminology with 'len' easier.
+
+Example #1
+
+ Usage: lte=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is less than or equal to time.Now.UTC().
+
+ Usage: lte
+
+Field Equals Another Field
+
+This will validate the field value against another fields value either within
+a struct or passed in field.
+
+Example #1:
+
+ // Validation on Password field using:
+ Usage: eqfield=ConfirmPassword
+
+Example #2:
+
+ // Validating by field:
+ validate.FieldWithValue(password, confirmpassword, "eqfield")
+
+Field Equals Another Field (relative)
+
+This does the same as eqfield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: eqcsfield=InnerStructField.Field)
+
+Field Does Not Equal Another Field
+
+This will validate the field value against another fields value either within
+a struct or passed in field.
+
+Examples:
+
+ // Confirm two colors are not the same:
+ //
+ // Validation on Color field:
+ Usage: nefield=Color2
+
+ // Validating by field:
+ validate.FieldWithValue(color1, color2, "nefield")
+
+Field Does Not Equal Another Field (relative)
+
+This does the same as nefield except that it validates the field provided
+relative to the top level struct.
+
+ Usage: necsfield=InnerStructField.Field
+
+Field Greater Than Another Field
+
+Only valid for Numbers and time.Time types, this will validate the field value
+against another fields value either within a struct or passed in field.
+usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(gtfield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.FieldWithValue(start, end, "gtfield")
+
+
+Field Greater Than Another Relative Field
+
+This does the same as gtfield except that it validates the field provided
+relative to the top level struct.
+
+ Usage: gtcsfield=InnerStructField.Field
+
+Field Greater Than or Equal To Another Field
+
+Only valid for Numbers and time.Time types, this will validate the field value
+against another fields value either within a struct or passed in field.
+usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(gtefield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.FieldWithValue(start, end, "gtefield")
+
+Field Greater Than or Equal To Another Relative Field
+
+This does the same as gtefield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: gtecsfield=InnerStructField.Field
+
+Less Than Another Field
+
+Only valid for Numbers and time.Time types, this will validate the field value
+against another fields value either within a struct or passed in field.
+usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(ltfield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.FieldWithValue(start, end, "ltfield")
+
+Less Than Another Relative Field
+
+This does the same as ltfield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: ltcsfield=InnerStructField.Field
+
+Less Than or Equal To Another Field
+
+Only valid for Numbers and time.Time types, this will validate the field value
+against another fields value either within a struct or passed in field.
+usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(ltefield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.FieldWithValue(start, end, "ltefield")
+
+Less Than or Equal To Another Relative Field
+
+This does the same as ltefield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: ltecsfield=InnerStructField.Field
+
+Alpha Only
+
+This validates that a string value contains alpha characters only
+
+ Usage: alpha
+
+Alphanumeric
+
+This validates that a string value contains alphanumeric characters only
+
+ Usage: alphanum
+
+Numeric
+
+This validates that a string value contains a basic numeric value.
+basic excludes exponents etc...
+
+ Usage: numeric
+
+Hexadecimal String
+
+This validates that a string value contains a valid hexadecimal.
+
+ Usage: hexadecimal
+
+Hexcolor String
+
+This validates that a string value contains a valid hex color including
+hashtag (#)
+
+ Usage: hexcolor
+
+RGB String
+
+This validates that a string value contains a valid rgb color
+
+ Usage: rgb
+
+RGBA String
+
+This validates that a string value contains a valid rgba color
+
+ Usage: rgba
+
+HSL String
+
+This validates that a string value contains a valid hsl color
+
+ Usage: hsl
+
+HSLA String
+
+This validates that a string value contains a valid hsla color
+
+ Usage: hsla
+
+E-mail String
+
+This validates that a string value contains a valid email
+This may not conform to all possibilities of any rfc standard, but neither
+does any email provider accept all posibilities.
+
+ Usage: email
+
+URL String
+
+This validates that a string value contains a valid url
+This will accept any url the golang request uri accepts but must contain
+a schema for example http:// or rtmp://
+
+ Usage: url
+
+URI String
+
+This validates that a string value contains a valid uri
+This will accept any uri the golang request uri accepts
+
+ Usage: uri
+
+Base64 String
+
+This validates that a string value contains a valid base64 value.
+Although an empty string is valid base64 this will report an empty string
+as an error, if you wish to accept an empty string as valid you can use
+this with the omitempty tag.
+
+ Usage: base64
+
+Contains
+
+This validates that a string value contains the substring value.
+
+ Usage: contains=@
+
+Contains Any
+
+This validates that a string value contains any Unicode code points
+in the substring value.
+
+ Usage: containsany=!@#?
+
+Contains Rune
+
+This validates that a string value contains the supplied rune value.
+
+ Usage: containsrune=@
+
+Excludes
+
+This validates that a string value does not contain the substring value.
+
+ Usage: excludes=@
+
+Excludes All
+
+This validates that a string value does not contain any Unicode code
+points in the substring value.
+
+ Usage: excludesall=!@#?
+
+Excludes Rune
+
+This validates that a string value does not contain the supplied rune value.
+
+ Usage: excludesrune=@
+
+International Standard Book Number
+
+This validates that a string value contains a valid isbn10 or isbn13 value.
+
+ Usage: isbn
+
+International Standard Book Number 10
+
+This validates that a string value contains a valid isbn10 value.
+
+ Usage: isbn10
+
+International Standard Book Number 13
+
+This validates that a string value contains a valid isbn13 value.
+
+ Usage: isbn13
+
+
+Universally Unique Identifier UUID
+
+This validates that a string value contains a valid UUID.
+
+ Usage: uuid
+
+Universally Unique Identifier UUID v3
+
+This validates that a string value contains a valid version 3 UUID.
+
+ Usage: uuid3
+
+Universally Unique Identifier UUID v4
+
+This validates that a string value contains a valid version 4 UUID.
+
+ Usage: uuid4
+
+Universally Unique Identifier UUID v5
+
+This validates that a string value contains a valid version 5 UUID.
+
+ Usage: uuid5
+
+ASCII
+
+This validates that a string value contains only ASCII characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: ascii
+
+Printable ASCII
+
+This validates that a string value contains only printable ASCII characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: asciiprint
+
+Multi-Byte Characters
+
+This validates that a string value contains one or more multibyte characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: multibyte
+
+Data URL
+
+This validates that a string value contains a valid DataURI.
+NOTE: this will also validate that the data portion is valid base64
+
+ Usage: datauri
+
+Latitude
+
+This validates that a string value contains a valid latitude.
+
+ Usage: latitude
+
+Longitude
+
+This validates that a string value contains a valid longitude.
+
+ Usage: longitude
+
+Social Security Number SSN
+
+This validates that a string value contains a valid U.S. Social Security Number.
+
+ Usage: ssn
+
+Internet Protocol Address IP
+
+This validates that a string value contains a valid IP Adress.
+
+ Usage: ip
+
+Internet Protocol Address IPv4
+
+This validates that a string value contains a valid v4 IP Adress.
+
+ Usage: ipv4
+
+Internet Protocol Address IPv6
+
+This validates that a string value contains a valid v6 IP Adress.
+
+ Usage: ipv6
+
+Classless Inter-Domain Routing CIDR
+
+This validates that a string value contains a valid CIDR Adress.
+
+ Usage: cidr
+
+Classless Inter-Domain Routing CIDRv4
+
+This validates that a string value contains a valid v4 CIDR Adress.
+
+ Usage: cidrv4
+
+Classless Inter-Domain Routing CIDRv6
+
+This validates that a string value contains a valid v6 CIDR Adress.
+
+ Usage: cidrv6
+
+Transmission Control Protocol Address TCP
+
+This validates that a string value contains a valid resolvable TCP Adress.
+
+ Usage: tcp_addr
+
+Transmission Control Protocol Address TCPv4
+
+This validates that a string value contains a valid resolvable v4 TCP Adress.
+
+ Usage: tcp4_addr
+
+Transmission Control Protocol Address TCPv6
+
+This validates that a string value contains a valid resolvable v6 TCP Adress.
+
+ Usage: tcp6_addr
+
+User Datagram Protocol Address UDP
+
+This validates that a string value contains a valid resolvable UDP Adress.
+
+ Usage: udp_addr
+
+User Datagram Protocol Address UDPv4
+
+This validates that a string value contains a valid resolvable v4 UDP Adress.
+
+ Usage: udp4_addr
+
+User Datagram Protocol Address UDPv6
+
+This validates that a string value contains a valid resolvable v6 UDP Adress.
+
+ Usage: udp6_addr
+
+Internet Protocol Address IP
+
+This validates that a string value contains a valid resolvable IP Adress.
+
+ Usage: ip_addr
+
+Internet Protocol Address IPv4
+
+This validates that a string value contains a valid resolvable v4 IP Adress.
+
+ Usage: ip4_addr
+
+Internet Protocol Address IPv6
+
+This validates that a string value contains a valid resolvable v6 IP Adress.
+
+ Usage: ip6_addr
+
+Unix domain socket end point Address
+
+This validates that a string value contains a valid Unix Adress.
+
+ Usage: unix_addr
+
+Media Access Control Address MAC
+
+This validates that a string value contains a valid MAC Adress.
+
+ Usage: mac
+
+Note: See Go's ParseMAC for accepted formats and types:
+
+ http://golang.org/src/net/mac.go?s=866:918#L29
+
+Alias Validators and Tags
+
+NOTE: When returning an error, the tag returned in "FieldError" will be
+the alias tag unless the dive tag is part of the alias. Everything after the
+dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
+case will be the actual tag within the alias that failed.
+
+Here is a list of the current built in alias tags:
+
+ "iscolor"
+ alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
+
+Validator notes:
+
+ regex
+ a regex validator won't be added because commas and = signs can be part
+ of a regex which conflict with the validation definitions. Although
+ workarounds can be made, they take away from using pure regex's.
+ Furthermore it's quick and dirty but the regex's become harder to
+ maintain and are not reusable, so it's as much a programming philosiphy
+ as anything.
+
+ In place of this new validator functions should be created; a regex can
+ be used within the validator function and even be precompiled for better
+ efficiency within regexes.go.
+
+ And the best reason, you can submit a pull request and we can keep on
+ adding to the validation library of this package!
+
+Panics
+
+This package panics when bad input is provided, this is by design, bad code like
+that should not make it to production.
+
+ type Test struct {
+ TestField string `validate:"nonexistantfunction=1"`
+ }
+
+ t := &Test{
+ TestField: "Test"
+ }
+
+ validate.Struct(t) // this will panic
+*/
+package validator
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/logo.png b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/logo.png
new file mode 100644
index 0000000..355000f
Binary files /dev/null and b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/logo.png differ
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/regexes.go b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/regexes.go
new file mode 100644
index 0000000..83ae198
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/regexes.go
@@ -0,0 +1,59 @@
+package validator
+
+import "regexp"
+
+const (
+ alphaRegexString = "^[a-zA-Z]+$"
+ alphaNumericRegexString = "^[a-zA-Z0-9]+$"
+ numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
+ numberRegexString = "^[0-9]+$"
+ hexadecimalRegexString = "^[0-9a-fA-F]+$"
+ hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
+ rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
+ hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
+ hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
+ emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
+ iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
+ uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ aSCIIRegexString = "^[\x00-\x7F]*$"
+ printableASCIIRegexString = "^[\x20-\x7E]*$"
+ multibyteRegexString = "[^\x00-\x7F]"
+ dataURIRegexString = "^data:.+\\/(.+);base64$"
+ latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ sSNRegexString = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
+)
+
+var (
+ alphaRegex = regexp.MustCompile(alphaRegexString)
+ alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
+ numericRegex = regexp.MustCompile(numericRegexString)
+ numberRegex = regexp.MustCompile(numberRegexString)
+ hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
+ hexcolorRegex = regexp.MustCompile(hexcolorRegexString)
+ rgbRegex = regexp.MustCompile(rgbRegexString)
+ rgbaRegex = regexp.MustCompile(rgbaRegexString)
+ hslRegex = regexp.MustCompile(hslRegexString)
+ hslaRegex = regexp.MustCompile(hslaRegexString)
+ emailRegex = regexp.MustCompile(emailRegexString)
+ base64Regex = regexp.MustCompile(base64RegexString)
+ iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
+ iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
+ uUID3Regex = regexp.MustCompile(uUID3RegexString)
+ uUID4Regex = regexp.MustCompile(uUID4RegexString)
+ uUID5Regex = regexp.MustCompile(uUID5RegexString)
+ uUIDRegex = regexp.MustCompile(uUIDRegexString)
+ aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
+ printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
+ multibyteRegex = regexp.MustCompile(multibyteRegexString)
+ dataURIRegex = regexp.MustCompile(dataURIRegexString)
+ latitudeRegex = regexp.MustCompile(latitudeRegexString)
+ longitudeRegex = regexp.MustCompile(longitudeRegexString)
+ sSNRegex = regexp.MustCompile(sSNRegexString)
+)
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/util.go b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/util.go
new file mode 100644
index 0000000..ce01c7c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/util.go
@@ -0,0 +1,382 @@
+package validator
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ dash = "-"
+ blank = ""
+ namespaceSeparator = "."
+ leftBracket = "["
+ rightBracket = "]"
+ restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
+ restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
+ restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
+)
+
+var (
+ restrictedTags = map[string]*struct{}{
+ diveTag: emptyStructPtr,
+ existsTag: emptyStructPtr,
+ structOnlyTag: emptyStructPtr,
+ omitempty: emptyStructPtr,
+ skipValidationTag: emptyStructPtr,
+ utf8HexComma: emptyStructPtr,
+ utf8Pipe: emptyStructPtr,
+ noStructLevelTag: emptyStructPtr,
+ }
+)
+
+// ExtractType gets the actual underlying type of field value.
+// It will dive into pointers, customTypes and return you the
+// underlying value and it's kind.
+// it is exposed for use within you Custom Functions
+func (v *Validate) ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) {
+
+ switch current.Kind() {
+ case reflect.Ptr:
+
+ if current.IsNil() {
+ return current, reflect.Ptr
+ }
+
+ return v.ExtractType(current.Elem())
+
+ case reflect.Interface:
+
+ if current.IsNil() {
+ return current, reflect.Interface
+ }
+
+ return v.ExtractType(current.Elem())
+
+ case reflect.Invalid:
+ return current, reflect.Invalid
+
+ default:
+
+ if v.hasCustomFuncs {
+ // fmt.Println("Type", current.Type())
+ if fn, ok := v.customTypeFuncs[current.Type()]; ok {
+
+ // fmt.Println("OK")
+
+ return v.ExtractType(reflect.ValueOf(fn(current)))
+ }
+
+ // fmt.Println("NOT OK")
+ }
+
+ return current, current.Kind()
+ }
+}
+
+// GetStructFieldOK traverses a struct to retrieve a specific field denoted by the provided namespace and
+// returns the field, field kind and whether is was successful in retrieving the field at all.
+// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+// could not be retrived because it didnt exist.
+func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
+
+ current, kind := v.ExtractType(current)
+
+ if kind == reflect.Invalid {
+ return current, kind, false
+ }
+
+ if namespace == blank {
+ return current, kind, true
+ }
+
+ switch kind {
+
+ case reflect.Ptr, reflect.Interface:
+
+ return current, kind, false
+
+ case reflect.Struct:
+
+ typ := current.Type()
+ fld := namespace
+ ns := namespace
+
+ if typ != timeType && typ != timePtrType {
+
+ idx := strings.Index(namespace, namespaceSeparator)
+
+ if idx != -1 {
+ fld = namespace[:idx]
+ ns = namespace[idx+1:]
+ } else {
+ ns = blank
+ idx = len(namespace)
+ }
+
+ bracketIdx := strings.Index(fld, leftBracket)
+ if bracketIdx != -1 {
+ fld = fld[:bracketIdx]
+
+ ns = namespace[bracketIdx:]
+ }
+
+ current = current.FieldByName(fld)
+
+ return v.GetStructFieldOK(current, ns)
+ }
+
+ case reflect.Array, reflect.Slice:
+ idx := strings.Index(namespace, leftBracket)
+ idx2 := strings.Index(namespace, rightBracket)
+
+ arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
+
+ if arrIdx >= current.Len() {
+ return current, kind, false
+ }
+
+ startIdx := idx2 + 1
+
+ if startIdx < len(namespace) {
+ if namespace[startIdx:startIdx+1] == namespaceSeparator {
+ startIdx++
+ }
+ }
+
+ return v.GetStructFieldOK(current.Index(arrIdx), namespace[startIdx:])
+
+ case reflect.Map:
+ idx := strings.Index(namespace, leftBracket) + 1
+ idx2 := strings.Index(namespace, rightBracket)
+
+ endIdx := idx2
+
+ if endIdx+1 < len(namespace) {
+ if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
+ endIdx++
+ }
+ }
+
+ key := namespace[idx:idx2]
+
+ switch current.Type().Key().Kind() {
+ case reflect.Int:
+ i, _ := strconv.Atoi(key)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
+ case reflect.Int8:
+ i, _ := strconv.ParseInt(key, 10, 8)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int8(i))), namespace[endIdx+1:])
+ case reflect.Int16:
+ i, _ := strconv.ParseInt(key, 10, 16)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int16(i))), namespace[endIdx+1:])
+ case reflect.Int32:
+ i, _ := strconv.ParseInt(key, 10, 32)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int32(i))), namespace[endIdx+1:])
+ case reflect.Int64:
+ i, _ := strconv.ParseInt(key, 10, 64)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
+ case reflect.Uint:
+ i, _ := strconv.ParseUint(key, 10, 0)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint(i))), namespace[endIdx+1:])
+ case reflect.Uint8:
+ i, _ := strconv.ParseUint(key, 10, 8)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint8(i))), namespace[endIdx+1:])
+ case reflect.Uint16:
+ i, _ := strconv.ParseUint(key, 10, 16)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint16(i))), namespace[endIdx+1:])
+ case reflect.Uint32:
+ i, _ := strconv.ParseUint(key, 10, 32)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint32(i))), namespace[endIdx+1:])
+ case reflect.Uint64:
+ i, _ := strconv.ParseUint(key, 10, 64)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
+ case reflect.Float32:
+ f, _ := strconv.ParseFloat(key, 32)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(float32(f))), namespace[endIdx+1:])
+ case reflect.Float64:
+ f, _ := strconv.ParseFloat(key, 64)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(f)), namespace[endIdx+1:])
+ case reflect.Bool:
+ b, _ := strconv.ParseBool(key)
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(b)), namespace[endIdx+1:])
+
+ // reflect.Type = string
+ default:
+ return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(key)), namespace[endIdx+1:])
+ }
+ }
+
+ // if got here there was more namespace, cannot go any deeper
+ panic("Invalid field namespace")
+}
+
+// asInt retuns the parameter as a int64
+// or panics if it can't convert
+func asInt(param string) int64 {
+
+ i, err := strconv.ParseInt(param, 0, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asUint returns the parameter as a uint64
+// or panics if it can't convert
+func asUint(param string) uint64 {
+
+ i, err := strconv.ParseUint(param, 0, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asFloat returns the parameter as a float64
+// or panics if it can't convert
+func asFloat(param string) float64 {
+
+ i, err := strconv.ParseFloat(param, 64)
+ panicIf(err)
+
+ return i
+}
+
+func panicIf(err error) {
+ if err != nil {
+ panic(err.Error())
+ }
+}
+
+func (v *Validate) parseStruct(current reflect.Value, sName string) *cachedStruct {
+
+ typ := current.Type()
+ s := &cachedStruct{Name: sName, fields: map[int]cachedField{}}
+
+ numFields := current.NumField()
+
+ var fld reflect.StructField
+ var tag string
+ var customName string
+
+ for i := 0; i < numFields; i++ {
+
+ fld = typ.Field(i)
+
+ if fld.PkgPath != blank {
+ continue
+ }
+
+ tag = fld.Tag.Get(v.tagName)
+
+ if tag == skipValidationTag {
+ continue
+ }
+
+ customName = fld.Name
+ if v.fieldNameTag != blank {
+
+ name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0]
+
+ // dash check is for json "-" (aka skipValidationTag) means don't output in json
+ if name != "" && name != skipValidationTag {
+ customName = name
+ }
+ }
+
+ cTag, ok := v.tagCache.Get(tag)
+ if !ok {
+ cTag = v.parseTags(tag, fld.Name)
+ }
+
+ s.fields[i] = cachedField{Idx: i, Name: fld.Name, AltName: customName, CachedTag: cTag}
+ }
+
+ v.structCache.Set(typ, s)
+
+ return s
+}
+
+func (v *Validate) parseTags(tag, fieldName string) *cachedTag {
+
+ cTag := &cachedTag{tag: tag}
+
+ v.parseTagsRecursive(cTag, tag, fieldName, blank, false)
+
+ v.tagCache.Set(tag, cTag)
+
+ return cTag
+}
+
+func (v *Validate) parseTagsRecursive(cTag *cachedTag, tag, fieldName, alias string, isAlias bool) bool {
+
+ if tag == blank {
+ return true
+ }
+
+ for _, t := range strings.Split(tag, tagSeparator) {
+
+ if v.hasAliasValidators {
+ // check map for alias and process new tags, otherwise process as usual
+ if tagsVal, ok := v.aliasValidators[t]; ok {
+
+ leave := v.parseTagsRecursive(cTag, tagsVal, fieldName, t, true)
+
+ if leave {
+ return leave
+ }
+
+ continue
+ }
+ }
+
+ switch t {
+
+ case diveTag:
+ cTag.diveTag = tag
+ tVals := &tagVals{tagVals: [][]string{{t}}}
+ cTag.tags = append(cTag.tags, tVals)
+ return true
+
+ case omitempty:
+ cTag.isOmitEmpty = true
+
+ case structOnlyTag:
+ cTag.isStructOnly = true
+
+ case noStructLevelTag:
+ cTag.isNoStructLevel = true
+ }
+
+ // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
+ orVals := strings.Split(t, orSeparator)
+ tagVal := &tagVals{isAlias: isAlias, isOrVal: len(orVals) > 1, tagVals: make([][]string, len(orVals))}
+ cTag.tags = append(cTag.tags, tagVal)
+
+ var key string
+ var param string
+
+ for i, val := range orVals {
+ vals := strings.SplitN(val, tagKeySeparator, 2)
+ key = vals[0]
+
+ tagVal.tag = key
+
+ if isAlias {
+ tagVal.tag = alias
+ }
+
+ if key == blank {
+ panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
+ }
+
+ if len(vals) > 1 {
+ param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
+ }
+
+ tagVal.tagVals[i] = []string{key, param}
+ }
+ }
+
+ return false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/validator.go b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/validator.go
new file mode 100644
index 0000000..0e6db7e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/go-playground/validator.v8/validator.go
@@ -0,0 +1,797 @@
+/**
+ * Package validator
+ *
+ * MISC:
+ * - anonymous structs - they don't have names so expect the Struct name within StructErrors to be blank
+ *
+ */
+
+package validator
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ utf8HexComma = "0x2C"
+ utf8Pipe = "0x7C"
+ tagSeparator = ","
+ orSeparator = "|"
+ tagKeySeparator = "="
+ structOnlyTag = "structonly"
+ noStructLevelTag = "nostructlevel"
+ omitempty = "omitempty"
+ skipValidationTag = "-"
+ diveTag = "dive"
+ existsTag = "exists"
+ fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
+ arrayIndexFieldName = "%s" + leftBracket + "%d" + rightBracket
+ mapIndexFieldName = "%s" + leftBracket + "%v" + rightBracket
+ invalidValidation = "Invalid validation tag on field %s"
+ undefinedValidation = "Undefined validation function on field %s"
+ validatorNotInitialized = "Validator instance not initialized"
+ fieldNameRequired = "Field Name Required"
+ tagRequired = "Tag Required"
+)
+
+var (
+ timeType = reflect.TypeOf(time.Time{})
+ timePtrType = reflect.TypeOf(&time.Time{})
+ emptyStructPtr = new(struct{})
+)
+
+// StructLevel contains all of the information and helper methods
+// for reporting errors during struct level validation
+type StructLevel struct {
+ TopStruct reflect.Value
+ CurrentStruct reflect.Value
+ errPrefix string
+ nsPrefix string
+ errs ValidationErrors
+ v *Validate
+}
+
+// ReportValidationErrors accepts the key relative to the top level struct and validatin errors.
+// Example: had a triple nested struct User, ContactInfo, Country and ran errs := validate.Struct(country)
+// from within a User struct level validation would call this method like so:
+// ReportValidationErrors("ContactInfo.", errs)
+// NOTE: relativeKey can contain both the Field Relative and Custom name relative paths
+// i.e. ReportValidationErrors("ContactInfo.|cInfo", errs) where cInfo represents say the JSON name of
+// the relative path; this will be split into 2 variables in the next valiator version.
+func (sl *StructLevel) ReportValidationErrors(relativeKey string, errs ValidationErrors) {
+ for _, e := range errs {
+
+ idx := strings.Index(relativeKey, "|")
+ var rel string
+ var cRel string
+
+ if idx != -1 {
+ rel = relativeKey[:idx]
+ cRel = relativeKey[idx+1:]
+ } else {
+ rel = relativeKey
+ }
+
+ key := sl.errPrefix + rel + e.Field
+
+ e.FieldNamespace = key
+ e.NameNamespace = sl.nsPrefix + cRel + e.Name
+
+ sl.errs[key] = e
+ }
+}
+
+// ReportError reports an error just by passing the field and tag information
+// NOTE: tag can be an existing validation tag or just something you make up
+// and precess on the flip side it's up to you.
+func (sl *StructLevel) ReportError(field reflect.Value, fieldName string, customName string, tag string) {
+
+ field, kind := sl.v.ExtractType(field)
+
+ if fieldName == blank {
+ panic(fieldNameRequired)
+ }
+
+ if customName == blank {
+ customName = fieldName
+ }
+
+ if tag == blank {
+ panic(tagRequired)
+ }
+
+ ns := sl.errPrefix + fieldName
+
+ switch kind {
+ case reflect.Invalid:
+ sl.errs[ns] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: sl.nsPrefix + customName,
+ Name: customName,
+ Field: fieldName,
+ Tag: tag,
+ ActualTag: tag,
+ Param: blank,
+ Kind: kind,
+ }
+ default:
+ sl.errs[ns] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: sl.nsPrefix + customName,
+ Name: customName,
+ Field: fieldName,
+ Tag: tag,
+ ActualTag: tag,
+ Param: blank,
+ Value: field.Interface(),
+ Kind: kind,
+ Type: field.Type(),
+ }
+ }
+}
+
+// Validate contains the validator settings passed in using the Config struct
+type Validate struct {
+ tagName string
+ fieldNameTag string
+ validationFuncs map[string]Func
+ structLevelFuncs map[reflect.Type]StructLevelFunc
+ customTypeFuncs map[reflect.Type]CustomTypeFunc
+ aliasValidators map[string]string
+ hasCustomFuncs bool
+ hasAliasValidators bool
+ hasStructLevelFuncs bool
+ tagCache *tagCacheMap
+ structCache *structCacheMap
+ errsPool *sync.Pool
+}
+
+func (v *Validate) initCheck() {
+ if v == nil {
+ panic(validatorNotInitialized)
+ }
+}
+
+// Config contains the options that a Validator instance will use.
+// It is passed to the New() function
+type Config struct {
+ TagName string
+ FieldNameTag string
+}
+
+// CustomTypeFunc allows for overriding or adding custom field type handler functions
+// field = field value of the type to return a value to be validated
+// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
+type CustomTypeFunc func(field reflect.Value) interface{}
+
+// Func accepts all values needed for file and cross field validation
+// v = validator instance, needed but some built in functions for it's custom types
+// topStruct = top level struct when validating by struct otherwise nil
+// currentStruct = current level struct when validating by struct otherwise optional comparison value
+// field = field value for validation
+// param = parameter used in validation i.e. gt=0 param would be 0
+type Func func(v *Validate, topStruct reflect.Value, currentStruct reflect.Value, field reflect.Value, fieldtype reflect.Type, fieldKind reflect.Kind, param string) bool
+
+// StructLevelFunc accepts all values needed for struct level validation
+type StructLevelFunc func(v *Validate, structLevel *StructLevel)
+
+// ValidationErrors is a type of map[string]*FieldError
+// it exists to allow for multiple errors to be passed from this library
+// and yet still subscribe to the error interface
+type ValidationErrors map[string]*FieldError
+
+// Error is intended for use in development + debugging and not intended to be a production error message.
+// It allows ValidationErrors to subscribe to the Error interface.
+// All information to create an error message specific to your application is contained within
+// the FieldError found within the ValidationErrors map
+func (ve ValidationErrors) Error() string {
+
+ buff := bytes.NewBufferString(blank)
+
+ for key, err := range ve {
+ buff.WriteString(fmt.Sprintf(fieldErrMsg, key, err.Field, err.Tag))
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// FieldError contains a single field's validation error along
+// with other properties that may be needed for error message creation
+type FieldError struct {
+ FieldNamespace string
+ NameNamespace string
+ Field string
+ Name string
+ Tag string
+ ActualTag string
+ Kind reflect.Kind
+ Type reflect.Type
+ Param string
+ Value interface{}
+}
+
+// New creates a new Validate instance for use.
+func New(config *Config) *Validate {
+
+ v := &Validate{
+ tagName: config.TagName,
+ fieldNameTag: config.FieldNameTag,
+ tagCache: &tagCacheMap{m: map[string]*cachedTag{}},
+ structCache: &structCacheMap{m: map[reflect.Type]*cachedStruct{}},
+ errsPool: &sync.Pool{New: func() interface{} {
+ return ValidationErrors{}
+ }}}
+
+ if len(v.aliasValidators) == 0 {
+ // must copy alias validators for separate validations to be used in each validator instance
+ v.aliasValidators = map[string]string{}
+ for k, val := range bakedInAliasValidators {
+ v.RegisterAliasValidation(k, val)
+ }
+ }
+
+ if len(v.validationFuncs) == 0 {
+ // must copy validators for separate validations to be used in each instance
+ v.validationFuncs = map[string]Func{}
+ for k, val := range bakedInValidators {
+ v.RegisterValidation(k, val)
+ }
+ }
+
+ return v
+}
+
+// RegisterStructValidation registers a StructLevelFunc against a number of types
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
+ v.initCheck()
+
+ if v.structLevelFuncs == nil {
+ v.structLevelFuncs = map[reflect.Type]StructLevelFunc{}
+ }
+
+ for _, t := range types {
+ v.structLevelFuncs[reflect.TypeOf(t)] = fn
+ }
+
+ v.hasStructLevelFuncs = true
+}
+
+// RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key
+// NOTE: if the key already exists, the previous validation function will be replaced.
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterValidation(key string, fn Func) error {
+ v.initCheck()
+
+ if key == blank {
+ return errors.New("Function Key cannot be empty")
+ }
+
+ if fn == nil {
+ return errors.New("Function cannot be empty")
+ }
+
+ _, ok := restrictedTags[key]
+
+ if ok || strings.ContainsAny(key, restrictedTagChars) {
+ panic(fmt.Sprintf(restrictedTagErr, key))
+ }
+
+ v.validationFuncs[key] = fn
+
+ return nil
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
+ v.initCheck()
+
+ if v.customTypeFuncs == nil {
+ v.customTypeFuncs = map[reflect.Type]CustomTypeFunc{}
+ }
+
+ for _, t := range types {
+ v.customTypeFuncs[reflect.TypeOf(t)] = fn
+ }
+
+ v.hasCustomFuncs = true
+}
+
+// RegisterAliasValidation registers a mapping of a single validationstag that
+// defines a common or complex set of validation(s) to simplify adding validation
+// to structs. NOTE: when returning an error the tag returned in FieldError will be
+// the alias tag unless the dive tag is part of the alias; everything after the
+// dive tag is not reported as the alias tag. Also the ActualTag in the before case
+// will be the actual tag within the alias that failed.
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterAliasValidation(alias, tags string) {
+ v.initCheck()
+
+ _, ok := restrictedTags[alias]
+
+ if ok || strings.ContainsAny(alias, restrictedTagChars) {
+ panic(fmt.Sprintf(restrictedAliasErr, alias))
+ }
+
+ v.aliasValidators[alias] = tags
+ v.hasAliasValidators = true
+}
+
+// Field validates a single field using tag style validation and returns nil or ValidationErrors as type error.
+// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
+// NOTE: it returns ValidationErrors instead of a single FieldError because this can also
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) Field(field interface{}, tag string) error {
+ v.initCheck()
+
+ errs := v.errsPool.Get().(ValidationErrors)
+ fieldVal := reflect.ValueOf(field)
+
+ v.traverseField(fieldVal, fieldVal, fieldVal, blank, blank, errs, false, tag, blank, blank, false, false, nil, nil)
+
+ if len(errs) == 0 {
+ v.errsPool.Put(errs)
+ return nil
+ }
+
+ return errs
+}
+
+// FieldWithValue validates a single field, against another fields value using tag style validation and returns nil or ValidationErrors.
+// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
+// NOTE: it returns ValidationErrors instead of a single FieldError because this can also
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) FieldWithValue(val interface{}, field interface{}, tag string) error {
+ v.initCheck()
+
+ errs := v.errsPool.Get().(ValidationErrors)
+ topVal := reflect.ValueOf(val)
+
+ v.traverseField(topVal, topVal, reflect.ValueOf(field), blank, blank, errs, false, tag, blank, blank, false, false, nil, nil)
+
+ if len(errs) == 0 {
+ v.errsPool.Put(errs)
+ return nil
+ }
+
+ return errs
+}
+
+// StructPartial validates the fields passed in only, ignoring all others.
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error
+// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
+func (v *Validate) StructPartial(current interface{}, fields ...string) error {
+ v.initCheck()
+
+ sv, _ := v.ExtractType(reflect.ValueOf(current))
+ name := sv.Type().Name()
+ m := map[string]*struct{}{}
+
+ if fields != nil {
+ for _, k := range fields {
+
+ flds := strings.Split(k, namespaceSeparator)
+ if len(flds) > 0 {
+
+ key := name + namespaceSeparator
+ for _, s := range flds {
+
+ idx := strings.Index(s, leftBracket)
+
+ if idx != -1 {
+ for idx != -1 {
+ key += s[:idx]
+ m[key] = emptyStructPtr
+
+ idx2 := strings.Index(s, rightBracket)
+ idx2++
+ key += s[idx:idx2]
+ m[key] = emptyStructPtr
+ s = s[idx2:]
+ idx = strings.Index(s, leftBracket)
+ }
+ } else {
+
+ key += s
+ m[key] = emptyStructPtr
+ }
+
+ key += namespaceSeparator
+ }
+ }
+ }
+ }
+
+ errs := v.errsPool.Get().(ValidationErrors)
+
+ v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, false, m, false)
+
+ if len(errs) == 0 {
+ v.errsPool.Put(errs)
+ return nil
+ }
+
+ return errs
+}
+
+// StructExcept validates all fields except the ones passed in.
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error
+// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
+func (v *Validate) StructExcept(current interface{}, fields ...string) error {
+ v.initCheck()
+
+ sv, _ := v.ExtractType(reflect.ValueOf(current))
+ name := sv.Type().Name()
+ m := map[string]*struct{}{}
+
+ for _, key := range fields {
+ m[name+namespaceSeparator+key] = emptyStructPtr
+ }
+
+ errs := v.errsPool.Get().(ValidationErrors)
+
+ v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, true, m, false)
+
+ if len(errs) == 0 {
+ v.errsPool.Put(errs)
+ return nil
+ }
+
+ return errs
+}
+
+// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
+// it returns nil or ValidationErrors as error.
+// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
+func (v *Validate) Struct(current interface{}) error {
+ v.initCheck()
+
+ errs := v.errsPool.Get().(ValidationErrors)
+ sv := reflect.ValueOf(current)
+
+ v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, false, false, nil, false)
+
+ if len(errs) == 0 {
+ v.errsPool.Put(errs)
+ return nil
+ }
+
+ return errs
+}
+
+// tranverseStruct traverses a structs fields and then passes them to be validated by traverseField
+func (v *Validate) tranverseStruct(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, useStructName bool, partial bool, exclude bool, includeExclude map[string]*struct{}, isStructOnly bool) {
+
+ if current.Kind() == reflect.Ptr && !current.IsNil() {
+ current = current.Elem()
+ }
+
+ if current.Kind() != reflect.Struct && current.Kind() != reflect.Interface {
+ panic("value passed for validation is not a struct")
+ }
+
+ // var ok bool
+ typ := current.Type()
+
+ sName := typ.Name()
+
+ if useStructName {
+ errPrefix += sName + namespaceSeparator
+
+ if v.fieldNameTag != blank {
+ nsPrefix += sName + namespaceSeparator
+ }
+ }
+
+ // structonly tag present don't tranverseFields
+ // but must still check and run below struct level validation
+ // if present
+ if !isStructOnly {
+
+ var fld reflect.StructField
+
+ // is anonymous struct, cannot parse or cache as
+ // it has no name to index by
+ if sName == blank {
+
+ var customName string
+ var ok bool
+ numFields := current.NumField()
+
+ for i := 0; i < numFields; i++ {
+
+ fld = typ.Field(i)
+
+ if fld.PkgPath != blank && !fld.Anonymous {
+ continue
+ }
+
+ if partial {
+
+ _, ok = includeExclude[errPrefix+fld.Name]
+
+ if (ok && exclude) || (!ok && !exclude) {
+ continue
+ }
+ }
+
+ customName = fld.Name
+
+ if v.fieldNameTag != blank {
+
+ name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0]
+
+ // dash check is for json "-" means don't output in json
+ if name != blank && name != dash {
+ customName = name
+ }
+ }
+
+ v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, fld.Tag.Get(v.tagName), fld.Name, customName, partial, exclude, includeExclude, nil)
+ }
+ } else {
+ s, ok := v.structCache.Get(typ)
+ if !ok {
+ s = v.parseStruct(current, sName)
+ }
+
+ for i, f := range s.fields {
+
+ if partial {
+
+ _, ok = includeExclude[errPrefix+f.Name]
+
+ if (ok && exclude) || (!ok && !exclude) {
+ continue
+ }
+ }
+ fld = typ.Field(i)
+
+ v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, f.CachedTag.tag, fld.Name, f.AltName, partial, exclude, includeExclude, f.CachedTag)
+ }
+ }
+ }
+
+ // check if any struct level validations, after all field validations already checked.
+ if v.hasStructLevelFuncs {
+ if fn, ok := v.structLevelFuncs[current.Type()]; ok {
+ fn(v, &StructLevel{v: v, TopStruct: topStruct, CurrentStruct: current, errPrefix: errPrefix, nsPrefix: nsPrefix, errs: errs})
+ }
+ }
+}
+
+// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
+func (v *Validate) traverseField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, isStructField bool, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
+
+ if tag == skipValidationTag {
+ return
+ }
+
+ if cTag == nil {
+ var isCached bool
+ cTag, isCached = v.tagCache.Get(tag)
+
+ if !isCached {
+ cTag = v.parseTags(tag, name)
+ }
+ }
+
+ current, kind := v.ExtractType(current)
+ var typ reflect.Type
+
+ switch kind {
+ case reflect.Ptr, reflect.Interface, reflect.Invalid:
+ if cTag.isOmitEmpty {
+ return
+ }
+
+ if tag != blank {
+
+ ns := errPrefix + name
+
+ if kind == reflect.Invalid {
+ errs[ns] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: nsPrefix + customName,
+ Name: customName,
+ Field: name,
+ Tag: cTag.tags[0].tag,
+ ActualTag: cTag.tags[0].tagVals[0][0],
+ Param: cTag.tags[0].tagVals[0][1],
+ Kind: kind,
+ }
+ return
+ }
+
+ errs[ns] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: nsPrefix + customName,
+ Name: customName,
+ Field: name,
+ Tag: cTag.tags[0].tag,
+ ActualTag: cTag.tags[0].tagVals[0][0],
+ Param: cTag.tags[0].tagVals[0][1],
+ Value: current.Interface(),
+ Kind: kind,
+ Type: current.Type(),
+ }
+
+ return
+ }
+
+ // if we get here tag length is zero and we can leave
+ if kind == reflect.Invalid {
+ return
+ }
+
+ case reflect.Struct:
+ typ = current.Type()
+
+ if typ != timeType {
+
+ if cTag.isNoStructLevel {
+ return
+ }
+
+ v.tranverseStruct(topStruct, current, current, errPrefix+name+namespaceSeparator, nsPrefix+customName+namespaceSeparator, errs, false, partial, exclude, includeExclude, cTag.isStructOnly)
+ return
+ }
+ }
+
+ if tag == blank {
+ return
+ }
+
+ typ = current.Type()
+
+ var dive bool
+ var diveSubTag string
+
+ for _, valTag := range cTag.tags {
+
+ if valTag.tagVals[0][0] == existsTag {
+ continue
+ }
+
+ if valTag.tagVals[0][0] == diveTag {
+ dive = true
+ diveSubTag = strings.TrimLeft(strings.SplitN(cTag.diveTag, diveTag, 2)[1], ",")
+ break
+ }
+
+ if valTag.tagVals[0][0] == omitempty {
+
+ if !HasValue(v, topStruct, currentStruct, current, typ, kind, blank) {
+ return
+ }
+ continue
+ }
+
+ if v.validateField(topStruct, currentStruct, current, typ, kind, errPrefix, nsPrefix, errs, valTag, name, customName) {
+ return
+ }
+ }
+
+ if dive {
+ // traverse slice or map here
+ // or panic ;)
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ v.traverseSlice(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil)
+ case reflect.Map:
+ v.traverseMap(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil)
+ default:
+ // throw error, if not a slice or map then should not have gotten here
+ // bad dive tag
+ panic("dive error! can't dive on a non slice or map")
+ }
+ }
+}
+
+// traverseSlice traverses a Slice or Array's elements and passes them to traverseField for validation
+func (v *Validate) traverseSlice(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
+
+ for i := 0; i < current.Len(); i++ {
+ v.traverseField(topStruct, currentStruct, current.Index(i), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(arrayIndexFieldName, name, i), fmt.Sprintf(arrayIndexFieldName, customName, i), partial, exclude, includeExclude, cTag)
+ }
+}
+
+// traverseMap traverses a map's elements and passes them to traverseField for validation
+func (v *Validate) traverseMap(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
+
+ for _, key := range current.MapKeys() {
+ v.traverseField(topStruct, currentStruct, current.MapIndex(key), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(mapIndexFieldName, name, key.Interface()), fmt.Sprintf(mapIndexFieldName, customName, key.Interface()), partial, exclude, includeExclude, cTag)
+ }
+}
+
+// validateField validates a field based on the provided tag's key and param values and returns true if there is an error or false if all ok
+func (v *Validate) validateField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, currentType reflect.Type, currentKind reflect.Kind, errPrefix string, nsPrefix string, errs ValidationErrors, valTag *tagVals, name, customName string) bool {
+
+ var valFunc Func
+ var ok bool
+
+ if valTag.isOrVal {
+
+ errTag := blank
+
+ for _, val := range valTag.tagVals {
+
+ valFunc, ok = v.validationFuncs[val[0]]
+ if !ok {
+ panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name)))
+ }
+
+ if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, val[1]) {
+ return false
+ }
+
+ errTag += orSeparator + val[0]
+ }
+
+ ns := errPrefix + name
+
+ if valTag.isAlias {
+ errs[ns] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: nsPrefix + customName,
+ Name: customName,
+ Field: name,
+ Tag: valTag.tag,
+ ActualTag: errTag[1:],
+ Value: current.Interface(),
+ Type: currentType,
+ Kind: currentKind,
+ }
+ } else {
+ errs[errPrefix+name] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: nsPrefix + customName,
+ Name: customName,
+ Field: name,
+ Tag: errTag[1:],
+ ActualTag: errTag[1:],
+ Value: current.Interface(),
+ Type: currentType,
+ Kind: currentKind,
+ }
+ }
+
+ return true
+ }
+
+ valFunc, ok = v.validationFuncs[valTag.tagVals[0][0]]
+ if !ok {
+ panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name)))
+ }
+
+ if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, valTag.tagVals[0][1]) {
+ return false
+ }
+
+ ns := errPrefix + name
+
+ errs[ns] = &FieldError{
+ FieldNamespace: ns,
+ NameNamespace: nsPrefix + customName,
+ Name: customName,
+ Field: name,
+ Tag: valTag.tag,
+ ActualTag: valTag.tagVals[0][0],
+ Value: current.Interface(),
+ Param: valTag.tagVals[0][1],
+ Type: currentType,
+ Kind: currentKind,
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.gitignore b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.gitignore
new file mode 100644
index 0000000..3bf973e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.gitignore
@@ -0,0 +1,29 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/generator
+/cluster-test/cluster-test
+/cluster-test/*.log
+/cluster-test/es-chaos-monkey
+/spec
+/tmp
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.travis.yml b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.travis.yml
new file mode 100644
index 0000000..8819945
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/.travis.yml
@@ -0,0 +1,19 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.5
+ - tip
+
+env:
+ matrix:
+ - ES_VERSION=1.6.2
+ - ES_VERSION=1.7.2
+
+before_script:
+ - mkdir ${HOME}/elasticsearch
+ - wget http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz
+ - tar -xzf elasticsearch-${ES_VERSION}.tar.gz -C ${HOME}/elasticsearch
+ - ${HOME}/elasticsearch/elasticsearch-${ES_VERSION}/bin/elasticsearch >& /dev/null &
+ - sleep 15
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTING.md
new file mode 100644
index 0000000..bb61408
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+
+## Your Pull Request
+
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+
+* Work on the latest possible state of `olivere/elastic`.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+ Elasticsearch. At the moment, we're targeting the current and the previous
+ release, e.g. the 1.4 and the 1.3 branch.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+ probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+
+## Additional Resources
+
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTORS b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTORS
new file mode 100644
index 0000000..268b4ac
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/CONTRIBUTORS
@@ -0,0 +1,23 @@
+# This is a list of people who have contributed code
+# to the Elastic repository.
+#
+# It is just my small "thank you" to all those that helped
+# making Elastic what it is.
+#
+# Please keep this list sorted.
+
+Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
+Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
+Corey Scott [@corsc](https://github.com/corsc)
+Gerhard Häring [@ghaering](https://github.com/ghaering)
+Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
+Jack Lindamood [@cep21](https://github.com/cep21)
+Junpei Tsuji [@jun06t](https://github.com/jun06t)
+Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
+Mara Kim [@autochthe](https://github.com/autochthe)
+Medhi Bechina [@mdzor](https://github.com/mdzor)
+Nicholas Wolff [@nwolff](https://github.com/nwolff)
+Orne Brocaar [@brocaar](https://github.com/brocaar)
+Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
+Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
+zakthomas [@zakthomas](https://github.com/zakthomas)
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/LICENSE
new file mode 100644
index 0000000..8b22cdb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright © 2012-2015 Oliver Eilhard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the “Software”), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/README.md b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/README.md
new file mode 100644
index 0000000..2b7579a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/README.md
@@ -0,0 +1,419 @@
+# Elastic
+
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
+[Go](http://www.golang.org/) programming language.
+
+[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=master)](https://travis-ci.org/olivere/elastic)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/olivere/elastic)
+[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
+
+
+## Releases
+
+**Notice that the master branch always refers to the latest version of Elastic. If you want to use stable versions of Elastic, you should use the packages released via [gopkg.in](https://gopkg.in).**
+
+Here's the version matrix:
+
+Elasticsearch version | Elastic version -| Package URL
+----------------------|------------------|------------
+2.x | 3.0 **beta** | [`gopkg.in/olivere/elastic.v3-unstable`](https://gopkg.in/olivere/elastic.v3-unstable) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3-unstable))
+1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
+0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
+
+**Example:**
+
+You have Elasticsearch 1.6.0 installed and want to use Elastic. As listed above, you should use Elastic 2.0. So you first install Elastic 2.0.
+
+```sh
+$ go get gopkg.in/olivere/elastic.v2
+```
+
+Then you use it via the following import path:
+
+```go
+import "gopkg.in/olivere/elastic.v2"
+```
+
+### Elastic 3.0
+
+Elastic 3.0 targets Elasticsearch 2.x and is currently under [active development](https://github.com/olivere/elastic/tree/release-branch.v3). It is not published to gokpg yet.
+
+There are a lot of [breaking changes in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-2.0.html) and we will use this as an opportunity to [clean up and refactor Elastic as well](https://github.com/olivere/elastic/blob/release-branch.v3/CHANGELOG-3.0.md).
+
+### Elastic 2.0
+
+Elastic 2.0 targets Elasticsearch 1.x and published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
+
+### Elastic 1.0
+
+Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
+to a recent version.
+
+However, if you cannot update for some reason, don't worry. Version 1.0 is
+still available. All you need to do is go-get it and change your import path
+as described above.
+
+
+## Status
+
+We use Elastic in production since 2012. Although Elastic is quite stable
+from our experience, we don't have a stable API yet. The reason for this
+is that Elasticsearch changes quite often and at a fast pace.
+At this moment we focus on features, not on a stable API.
+
+Having said that, there have been no big API changes that required you
+to rewrite your application big time.
+More often than not it's renaming APIs and adding/removing features
+so that we are in sync with the Elasticsearch API.
+
+Elastic has been used in production with the following Elasticsearch versions:
+0.90, 1.0, 1.1, 1.2, 1.3, 1.4, and 1.5.
+Furthermore, we use [Travis CI](https://travis-ci.org/)
+to test Elastic with the most recent versions of Elasticsearch and Go.
+See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
+file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
+for the results.
+
+Elasticsearch has quite a few features. A lot of them are
+not yet implemented in Elastic (see below for details).
+I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+
+Having said that, I hope you find the project useful.
+
+
+## Usage
+
+The first thing you do is to create a Client. The client connects to
+Elasticsearch on http://127.0.0.1:9200 by default.
+
+You typically create one client for your app. Here's a complete example.
+
+```go
+// Create a client
+client, err := elastic.NewClient()
+if err != nil {
+ // Handle error
+}
+
+// Create an index
+_, err = client.CreateIndex("twitter").Do()
+if err != nil {
+ // Handle error
+ panic(err)
+}
+
+// Add a document to the index
+tweet := Tweet{User: "olivere", Message: "Take Five"}
+_, err = client.Index().
+ Index("twitter").
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet).
+ Do()
+if err != nil {
+ // Handle error
+ panic(err)
+}
+
+// Search with a term query
+termQuery := elastic.NewTermQuery("user", "olivere")
+searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+if err != nil {
+ // Handle error
+ panic(err)
+}
+
+// searchResult is of type SearchResult and returns hits, suggestions,
+// and all kinds of other information from Elasticsearch.
+fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+// Each is a convenience function that iterates over hits in a search result.
+// It makes sure you don't need to check for nil values in the response.
+// However, it ignores errors in serialization. If you want full control
+// over iterating the hits, see below.
+var ttyp Tweet
+for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ if t, ok := item.(Tweet); ok {
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+}
+// TotalHits is another convenience function that works even when something goes wrong.
+fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+// Here's how you iterate through results with full control over each step.
+if searchResult.Hits != nil {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ }
+
+ // Work with tweet
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+} else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+}
+
+// Delete the index again
+_, err = client.DeleteIndex("twitter").Do()
+if err != nil {
+ // Handle error
+ panic(err)
+}
+```
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
+
+
+## API Status
+
+Here's the current API status.
+
+### APIs
+
+- [x] Search (most queries, filters, facets, aggregations etc. are implemented: see below)
+- [x] Index
+- [x] Get
+- [x] Delete
+- [x] Delete By Query
+- [x] Update
+- [x] Multi Get
+- [x] Bulk
+- [ ] Bulk UDP
+- [ ] Term vectors
+- [ ] Multi term vectors
+- [x] Count
+- [ ] Validate
+- [x] Explain
+- [x] Search
+- [ ] Search shards
+- [x] Search template
+- [x] Facets (most are implemented, see below)
+- [x] Aggregates (most are implemented, see below)
+- [x] Multi Search
+- [x] Percolate
+- [ ] More like this
+- [ ] Benchmark
+
+### Indices
+
+- [x] Create index
+- [x] Delete index
+- [x] Get index
+- [x] Indices exists
+- [x] Open/close index
+- [x] Put mapping
+- [x] Get mapping
+- [ ] Get field mapping
+- [x] Types exist
+- [x] Delete mapping
+- [x] Index aliases
+- [ ] Update indices settings
+- [x] Get settings
+- [ ] Analyze
+- [x] Index templates
+- [ ] Warmers
+- [ ] Status
+- [x] Indices stats
+- [ ] Indices segments
+- [ ] Indices recovery
+- [ ] Clear cache
+- [x] Flush
+- [x] Refresh
+- [x] Optimize
+- [ ] Upgrade
+
+### Snapshot and Restore
+
+- [ ] Snapshot
+- [ ] Restore
+- [ ] Snapshot status
+- [ ] Monitoring snapshot/restore progress
+- [ ] Partial restore
+
+### Cat APIs
+
+Not implemented. Those are better suited for operating with Elasticsearch
+on the command line.
+
+### Cluster
+
+- [x] Health
+- [x] State
+- [x] Stats
+- [ ] Pending cluster tasks
+- [ ] Cluster reroute
+- [ ] Cluster update settings
+- [ ] Nodes stats
+- [x] Nodes info
+- [ ] Nodes hot_threads
+- [ ] Nodes shutdown
+
+### Search
+
+- [x] Inner hits (for ES >= 1.5.0; see [docs](www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html))
+
+### Query DSL
+
+#### Queries
+
+- [x] `match`
+- [x] `multi_match`
+- [x] `bool`
+- [x] `boosting`
+- [ ] `common_terms`
+- [ ] `constant_score`
+- [x] `dis_max`
+- [x] `filtered`
+- [x] `fuzzy_like_this_query` (`flt`)
+- [x] `fuzzy_like_this_field_query` (`flt_field`)
+- [x] `function_score`
+- [x] `fuzzy`
+- [ ] `geo_shape`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `match_all`
+- [x] `mlt`
+- [x] `mlt_field`
+- [x] `nested`
+- [x] `prefix`
+- [x] `query_string`
+- [x] `simple_query_string`
+- [x] `range`
+- [x] `regexp`
+- [ ] `span_first`
+- [ ] `span_multi_term`
+- [ ] `span_near`
+- [ ] `span_not`
+- [ ] `span_or`
+- [ ] `span_term`
+- [x] `term`
+- [x] `terms`
+- [ ] `top_children`
+- [x] `wildcard`
+- [x] `minimum_should_match`
+- [ ] `multi_term_query_rewrite`
+- [x] `template_query`
+
+#### Filters
+
+- [x] `and`
+- [x] `bool`
+- [x] `exists`
+- [ ] `geo_bounding_box`
+- [x] `geo_distance`
+- [ ] `geo_distance_range`
+- [x] `geo_polygon`
+- [ ] `geoshape`
+- [ ] `geohash`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `limit`
+- [x] `match_all`
+- [x] `missing`
+- [x] `nested`
+- [x] `not`
+- [x] `or`
+- [x] `prefix`
+- [x] `query`
+- [x] `range`
+- [x] `regexp`
+- [ ] `script`
+- [x] `term`
+- [x] `terms`
+- [x] `type`
+
+### Facets
+
+- [x] Terms
+- [x] Range
+- [x] Histogram
+- [x] Date Histogram
+- [x] Filter
+- [x] Query
+- [x] Statistical
+- [x] Terms Stats
+- [x] Geo Distance
+
+### Aggregations
+
+- [x] min
+- [x] max
+- [x] sum
+- [x] avg
+- [x] stats
+- [x] extended stats
+- [x] value count
+- [x] percentiles
+- [x] percentile ranks
+- [x] cardinality
+- [x] geo bounds
+- [x] top hits
+- [ ] scripted metric
+- [x] global
+- [x] filter
+- [x] filters
+- [x] missing
+- [x] nested
+- [x] reverse nested
+- [x] children
+- [x] terms
+- [x] significant terms
+- [x] range
+- [x] date range
+- [x] ipv4 range
+- [x] histogram
+- [x] date histogram
+- [x] geo distance
+- [x] geohash grid
+
+### Sorting
+
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+
+### Scan
+
+Scrolling through documents (e.g. `search_type=scan`) are implemented via
+the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
+
+## How to contribute
+
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+
+## Credits
+
+Thanks a lot for the great folks working hard on
+[Elasticsearch](http://www.elasticsearch.org/)
+and
+[Go](http://www.golang.org/).
+
+## LICENSE
+
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
+
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias.go
new file mode 100644
index 0000000..1bc5a0f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/alias.go
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+)
+
+type AliasService struct {
+ client *Client
+ actions []aliasAction
+ pretty bool
+}
+
+type aliasAction struct {
+ // "add" or "remove"
+ Type string
+ // Index name
+ Index string
+ // Alias name
+ Alias string
+ // Filter
+ Filter *Filter
+}
+
+func NewAliasService(client *Client) *AliasService {
+ builder := &AliasService{
+ client: client,
+ actions: make([]aliasAction, 0),
+ }
+ return builder
+}
+
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+ action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
+ s.actions = append(s.actions, action)
+ return s
+}
+
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter *Filter) *AliasService {
+ action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
+ s.actions = append(s.actions, action)
+ return s
+}
+
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+ action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
+ s.actions = append(s.actions, action)
+ return s
+}
+
+func (s *AliasService) Do() (*AliasResult, error) {
+ // Build url
+ path := "/_aliases"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Actions
+ body := make(map[string]interface{})
+ actionsJson := make([]interface{}, 0)
+
+ for _, action := range s.actions {
+ actionJson := make(map[string]interface{})
+ detailsJson := make(map[string]interface{})
+ detailsJson["index"] = action.Index
+ detailsJson["alias"] = action.Alias
+ if action.Filter != nil {
+ detailsJson["filter"] = (*action.Filter).Source()
+ }
+ actionJson[action.Type] = detailsJson
+ actionsJson = append(actionsJson, actionJson)
+ }
+
+ body["actions"] = actionsJson
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return results
+ ret := new(AliasResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasResult struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases.go
new file mode 100644
index 0000000..dddc231
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/aliases.go
@@ -0,0 +1,160 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type AliasesService struct {
+ client *Client
+ indices []string
+ pretty bool
+}
+
+func NewAliasesService(client *Client) *AliasesService {
+ builder := &AliasesService{
+ client: client,
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *AliasesService) Index(indexName string) *AliasesService {
+ s.indices = append(s.indices, indexName)
+ return s
+}
+
+func (s *AliasesService) Indices(indexNames ...string) *AliasesService {
+ s.indices = append(s.indices, indexNames...)
+ return s
+}
+
+func (s *AliasesService) Do() (*AliasesResult, error) {
+ var err error
+
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",")
+
+ // TODO Add types here
+
+ // Search
+ path += "/_aliases"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // {
+ // "indexName" : {
+ // "aliases" : {
+ // "alias1" : { },
+ // "alias2" : { }
+ // }
+ // },
+ // "indexName2" : {
+ // ...
+ // },
+ // }
+ indexMap := make(map[string]interface{})
+ if err := json.Unmarshal(res.Body, &indexMap); err != nil {
+ return nil, err
+ }
+
+ // Each (indexName, _)
+ ret := &AliasesResult{
+ Indices: make(map[string]indexResult),
+ }
+ for indexName, indexData := range indexMap {
+ indexOut, found := ret.Indices[indexName]
+ if !found {
+ indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+ }
+
+ // { "aliases" : { ... } }
+ indexDataMap, ok := indexData.(map[string]interface{})
+ if ok {
+ aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+ if ok {
+ for aliasName, _ := range aliasesData {
+ aliasRes := aliasResult{AliasName: aliasName}
+ indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+ }
+ }
+ }
+
+ ret.Indices[indexName] = indexOut
+ }
+
+ return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasesResult struct {
+ Indices map[string]indexResult
+}
+
+type indexResult struct {
+ Aliases []aliasResult
+}
+
+type aliasResult struct {
+ AliasName string
+}
+
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+ indices := make([]string, 0)
+
+ for indexName, indexInfo := range ar.Indices {
+ for _, aliasInfo := range indexInfo.Aliases {
+ if aliasInfo.AliasName == aliasName {
+ indices = append(indices, indexName)
+ }
+ }
+ }
+
+ return indices
+}
+
+func (ir indexResult) HasAlias(aliasName string) bool {
+ for _, alias := range ir.Aliases {
+ if alias.AliasName == aliasName {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk.go
new file mode 100644
index 0000000..90a52b9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk.go
@@ -0,0 +1,301 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type BulkService struct {
+ client *Client
+
+ index string
+ _type string
+ requests []BulkableRequest
+ //replicationType string
+ //consistencyLevel string
+ timeout string
+ refresh *bool
+ pretty bool
+}
+
+func NewBulkService(client *Client) *BulkService {
+ builder := &BulkService{
+ client: client,
+ requests: make([]BulkableRequest, 0),
+ }
+ return builder
+}
+
+func (s *BulkService) reset() {
+ s.requests = make([]BulkableRequest, 0)
+}
+
+func (s *BulkService) Index(index string) *BulkService {
+ s.index = index
+ return s
+}
+
+func (s *BulkService) Type(_type string) *BulkService {
+ s._type = _type
+ return s
+}
+
+func (s *BulkService) Timeout(timeout string) *BulkService {
+ s.timeout = timeout
+ return s
+}
+
+func (s *BulkService) Refresh(refresh bool) *BulkService {
+ s.refresh = &refresh
+ return s
+}
+
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *BulkService) Add(r BulkableRequest) *BulkService {
+ s.requests = append(s.requests, r)
+ return s
+}
+
+func (s *BulkService) NumberOfActions() int {
+ return len(s.requests)
+}
+
+func (s *BulkService) bodyAsString() (string, error) {
+ buf := bytes.NewBufferString("")
+
+ for _, req := range s.requests {
+ source, err := req.Source()
+ if err != nil {
+ return "", err
+ }
+ for _, line := range source {
+ _, err := buf.WriteString(fmt.Sprintf("%s\n", line))
+ if err != nil {
+ return "", nil
+ }
+ }
+ }
+
+ return buf.String(), nil
+}
+
+func (s *BulkService) Do() (*BulkResponse, error) {
+ // No actions?
+ if s.NumberOfActions() == 0 {
+ return nil, errors.New("elastic: No bulk actions to commit")
+ }
+
+ // Get body
+ body, err := s.bodyAsString()
+ if err != nil {
+ return nil, err
+ }
+
+ // Build url
+ path := "/"
+ if s.index != "" {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ path += index + "/"
+ }
+ if s._type != "" {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": s._type,
+ })
+ if err != nil {
+ return nil, err
+ }
+ path += typ + "/"
+ }
+ path += "_bulk"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return results
+ ret := new(BulkResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+
+ // Reset so the request can be reused
+ s.reset()
+
+ return ret, nil
+}
+
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+// "took":3,
+// "errors":false,
+// "items":[{
+// "index":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":3,
+// "status":201
+// }
+// },{
+// "index":{
+// "_index":"index2",
+// "_type":"tweet",
+// "_id":"2",
+// "_version":3,
+// "status":200
+// }
+// },{
+// "delete":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":4,
+// "status":200,
+// "found":true
+// }
+// },{
+// "update":{
+// "_index":"index2",
+// "_type":"tweet",
+// "_id":"2",
+// "_version":4,
+// "status":200
+// }
+// }]
+// }
+type BulkResponse struct {
+ Took int `json:"took,omitempty"`
+ Errors bool `json:"errors,omitempty"`
+ Items []map[string]*BulkResponseItem `json:"items,omitempty"`
+}
+
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int `json:"_version,omitempty"`
+ Status int `json:"status,omitempty"`
+ Found bool `json:"found,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+ return r.ByAction("index")
+}
+
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+ return r.ByAction("create")
+}
+
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+ return r.ByAction("update")
+}
+
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+ return r.ByAction("delete")
+}
+
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ items := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ if result, found := item[action]; found {
+ items = append(items, result)
+ }
+ }
+ return items
+}
+
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ items := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ for _, result := range item {
+ if result.Id == id {
+ items = append(items, result)
+ }
+ }
+ }
+ return items
+}
+
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ errors := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ for _, result := range item {
+ if !(result.Status >= 200 && result.Status <= 299) {
+ errors = append(errors, result)
+ }
+ }
+ }
+ return errors
+}
+
+// Succeeded returns those items of a bulk response that have no errors,
+// i.e. those have a status code between 200 and 299.
+func (r *BulkResponse) Succeeded() []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ }
+ succeeded := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ for _, result := range item {
+ if result.Status >= 200 && result.Status <= 299 {
+ succeeded = append(succeeded, result)
+ }
+ }
+ }
+ return succeeded
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request.go
new file mode 100644
index 0000000..0ea3722
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_delete_request.go
@@ -0,0 +1,112 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// -- Bulk delete request --
+
+// Bulk request to remove document from Elasticsearch.
+type BulkDeleteRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+ routing string
+ refresh *bool
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+}
+
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+ return &BulkDeleteRequest{}
+}
+
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+ r.index = index
+ return r
+}
+
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+ r.typ = typ
+ return r
+}
+
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+ r.id = id
+ return r
+}
+
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+ r.routing = routing
+ return r
+}
+
+func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
+ r.refresh = &refresh
+ return r
+}
+
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+ r.version = version
+ return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+ r.versionType = versionType
+ return r
+}
+
+func (r *BulkDeleteRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ }
+ return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+ lines := make([]string, 1)
+
+ source := make(map[string]interface{})
+ deleteCommand := make(map[string]interface{})
+ if r.index != "" {
+ deleteCommand["_index"] = r.index
+ }
+ if r.typ != "" {
+ deleteCommand["_type"] = r.typ
+ }
+ if r.id != "" {
+ deleteCommand["_id"] = r.id
+ }
+ if r.routing != "" {
+ deleteCommand["_routing"] = r.routing
+ }
+ if r.version > 0 {
+ deleteCommand["_version"] = r.version
+ }
+ if r.versionType != "" {
+ deleteCommand["_version_type"] = r.versionType
+ }
+ if r.refresh != nil {
+ deleteCommand["refresh"] = *r.refresh
+ }
+ source["delete"] = deleteCommand
+
+ body, err := json.Marshal(source)
+ if err != nil {
+ return nil, err
+ }
+
+ lines[0] = string(body)
+
+ return lines, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request.go
new file mode 100644
index 0000000..4956946
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_index_request.go
@@ -0,0 +1,173 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// Bulk request to add document to Elasticsearch.
+type BulkIndexRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+ opType string
+ routing string
+ parent string
+ timestamp string
+ ttl int64
+ refresh *bool
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+ doc interface{}
+}
+
+func NewBulkIndexRequest() *BulkIndexRequest {
+ return &BulkIndexRequest{
+ opType: "index",
+ }
+}
+
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+ r.index = index
+ return r
+}
+
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+ r.typ = typ
+ return r
+}
+
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+ r.id = id
+ return r
+}
+
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+ r.opType = opType
+ return r
+}
+
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+ r.routing = routing
+ return r
+}
+
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+ r.parent = parent
+ return r
+}
+
+func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
+ r.timestamp = timestamp
+ return r
+}
+
+func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
+ r.ttl = ttl
+ return r
+}
+
+func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
+ r.refresh = &refresh
+ return r
+}
+
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+ r.version = version
+ return r
+}
+
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+ r.versionType = versionType
+ return r
+}
+
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+ r.doc = doc
+ return r
+}
+
+func (r *BulkIndexRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ }
+ return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkIndexRequest) Source() ([]string, error) {
+ // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+ // { "field1" : "value1" }
+
+ lines := make([]string, 2)
+
+ // "index" ...
+ command := make(map[string]interface{})
+ indexCommand := make(map[string]interface{})
+ if r.index != "" {
+ indexCommand["_index"] = r.index
+ }
+ if r.typ != "" {
+ indexCommand["_type"] = r.typ
+ }
+ if r.id != "" {
+ indexCommand["_id"] = r.id
+ }
+ if r.routing != "" {
+ indexCommand["_routing"] = r.routing
+ }
+ if r.parent != "" {
+ indexCommand["_parent"] = r.parent
+ }
+ if r.timestamp != "" {
+ indexCommand["_timestamp"] = r.timestamp
+ }
+ if r.ttl > 0 {
+ indexCommand["_ttl"] = r.ttl
+ }
+ if r.version > 0 {
+ indexCommand["_version"] = r.version
+ }
+ if r.versionType != "" {
+ indexCommand["_version_type"] = r.versionType
+ }
+ if r.refresh != nil {
+ indexCommand["refresh"] = *r.refresh
+ }
+ command[r.opType] = indexCommand
+ line, err := json.Marshal(command)
+ if err != nil {
+ return nil, err
+ }
+ lines[0] = string(line)
+
+ // "field1" ...
+ if r.doc != nil {
+ switch t := r.doc.(type) {
+ default:
+ body, err := json.Marshal(r.doc)
+ if err != nil {
+ return nil, err
+ }
+ lines[1] = string(body)
+ case json.RawMessage:
+ lines[1] = string(t)
+ case *json.RawMessage:
+ lines[1] = string(*t)
+ case string:
+ lines[1] = t
+ case *string:
+ lines[1] = *t
+ }
+ } else {
+ lines[1] = "{}"
+ }
+
+ return lines, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_request.go
new file mode 100644
index 0000000..315b535
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_request.go
@@ -0,0 +1,17 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// -- Bulkable request (index/update/delete) --
+
+// Generic interface to bulkable requests.
+type BulkableRequest interface {
+ fmt.Stringer
+ Source() ([]string, error)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request.go
new file mode 100644
index 0000000..eba9f0d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/bulk_update_request.go
@@ -0,0 +1,244 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// Bulk request to update document in Elasticsearch.
+type BulkUpdateRequest struct {
+ BulkableRequest
+ index string
+ typ string
+ id string
+
+ routing string
+ parent string
+ script string
+ scriptType string
+ scriptLang string
+ scriptParams map[string]interface{}
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+ retryOnConflict *int
+ refresh *bool
+ upsert interface{}
+ docAsUpsert *bool
+ doc interface{}
+ ttl int64
+ timestamp string
+}
+
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+ return &BulkUpdateRequest{}
+}
+
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+ r.index = index
+ return r
+}
+
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+ r.typ = typ
+ return r
+}
+
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+ r.id = id
+ return r
+}
+
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+ r.routing = routing
+ return r
+}
+
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+ r.parent = parent
+ return r
+}
+
+func (r *BulkUpdateRequest) Script(script string) *BulkUpdateRequest {
+ r.script = script
+ return r
+}
+
+func (r *BulkUpdateRequest) ScriptType(scriptType string) *BulkUpdateRequest {
+ r.scriptType = scriptType
+ return r
+}
+
+func (r *BulkUpdateRequest) ScriptLang(scriptLang string) *BulkUpdateRequest {
+ r.scriptLang = scriptLang
+ return r
+}
+
+func (r *BulkUpdateRequest) ScriptParams(params map[string]interface{}) *BulkUpdateRequest {
+ r.scriptParams = params
+ return r
+}
+
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+ r.retryOnConflict = &retryOnConflict
+ return r
+}
+
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+ r.version = version
+ return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+ r.versionType = versionType
+ return r
+}
+
+func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
+ r.refresh = &refresh
+ return r
+}
+
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+ r.doc = doc
+ return r
+}
+
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+ r.docAsUpsert = &docAsUpsert
+ return r
+}
+
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+ r.upsert = doc
+ return r
+}
+
+func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
+ r.ttl = ttl
+ return r
+}
+
+func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
+ r.timestamp = timestamp
+ return r
+}
+
+func (r *BulkUpdateRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ }
+ return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
+ switch t := data.(type) {
+ default:
+ body, err := json.Marshal(data)
+ if err != nil {
+ return "", err
+ }
+ return string(body), nil
+ case json.RawMessage:
+ return string(t), nil
+ case *json.RawMessage:
+ return string(*t), nil
+ case string:
+ return t, nil
+ case *string:
+ return *t, nil
+ }
+}
+
+func (r BulkUpdateRequest) Source() ([]string, error) {
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "doc" : { "field1" : "value1", ... } }
+ // or
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "script" : { ... } }
+
+ lines := make([]string, 2)
+
+ // "update" ...
+ command := make(map[string]interface{})
+ updateCommand := make(map[string]interface{})
+ if r.index != "" {
+ updateCommand["_index"] = r.index
+ }
+ if r.typ != "" {
+ updateCommand["_type"] = r.typ
+ }
+ if r.id != "" {
+ updateCommand["_id"] = r.id
+ }
+ if r.routing != "" {
+ updateCommand["_routing"] = r.routing
+ }
+ if r.parent != "" {
+ updateCommand["_parent"] = r.parent
+ }
+ if r.timestamp != "" {
+ updateCommand["_timestamp"] = r.timestamp
+ }
+ if r.ttl > 0 {
+ updateCommand["_ttl"] = r.ttl
+ }
+ if r.version > 0 {
+ updateCommand["_version"] = r.version
+ }
+ if r.versionType != "" {
+ updateCommand["_version_type"] = r.versionType
+ }
+ if r.refresh != nil {
+ updateCommand["refresh"] = *r.refresh
+ }
+ if r.retryOnConflict != nil {
+ updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+ }
+ if r.upsert != nil {
+ updateCommand["upsert"] = r.upsert
+ }
+ command["update"] = updateCommand
+ line, err := json.Marshal(command)
+ if err != nil {
+ return nil, err
+ }
+ lines[0] = string(line)
+
+ // 2nd line: {"doc" : { ... }} or {"script": {...}}
+ source := make(map[string]interface{})
+ if r.docAsUpsert != nil {
+ source["doc_as_upsert"] = *r.docAsUpsert
+ }
+ if r.doc != nil {
+ // {"doc":{...}}
+ source["doc"] = r.doc
+ } else if r.script != "" {
+ // {"script":...}
+ source["script"] = r.script
+ if r.scriptLang != "" {
+ source["lang"] = r.scriptLang
+ }
+ /*
+ if r.scriptType != "" {
+ source["script_type"] = r.scriptType
+ }
+ */
+ if r.scriptParams != nil && len(r.scriptParams) > 0 {
+ source["params"] = r.scriptParams
+ }
+ }
+ lines[1], err = r.getSourceAsString(source)
+ if err != nil {
+ return nil, err
+ }
+
+ return lines, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize.go
new file mode 100644
index 0000000..6459308
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/canonicalize.go
@@ -0,0 +1,28 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "net/url"
+
+// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
+// remove anything but scheme, userinfo, host, and port. It also removes the
+// slash at the end. It also skips invalid URLs or URLs that do not use
+// protocol http or https.
+//
+// Example:
+// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200
+func canonicalize(rawurls ...string) []string {
+ canonicalized := make([]string, 0)
+ for _, rawurl := range rawurls {
+ u, err := url.Parse(rawurl)
+ if err == nil && (u.Scheme == "http" || u.Scheme == "https") {
+ u.Fragment = ""
+ u.Path = ""
+ u.RawQuery = ""
+ canonicalized = append(canonicalized, u.String())
+ }
+ }
+ return canonicalized
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll.go
new file mode 100644
index 0000000..13ac771
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/clear_scroll.go
@@ -0,0 +1,96 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// ClearScrollService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-request-scroll.html.
+type ClearScrollService struct {
+ client *Client
+ pretty bool
+ scrollId []string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewClearScrollService creates a new ClearScrollService.
+func NewClearScrollService(client *Client) *ClearScrollService {
+ return &ClearScrollService{
+ client: client,
+ scrollId: make([]string, 0),
+ }
+}
+
+// ScrollId is a list of scroll IDs to clear.
+// Use _all to clear all search contexts.
+func (s *ClearScrollService) ScrollId(scrollId ...string) *ClearScrollService {
+ s.scrollId = make([]string, 0)
+ s.scrollId = append(s.scrollId, scrollId...)
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClearScrollService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/_search/scroll", map[string]string{})
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ return path, url.Values{}, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClearScrollService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ body := strings.Join(s.scrollId, ",")
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClearScrollResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClearScrollResponse is the response of ClearScrollService.Do.
+type ClearScrollResponse struct {
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client.go
new file mode 100644
index 0000000..8e899cd
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/client.go
@@ -0,0 +1,1291 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // Version is the current version of Elastic.
+ Version = "2.0.12"
+
+ // DefaultUrl is the default endpoint of Elasticsearch on the local machine.
+ // It is used e.g. when initializing a new Client without a specific URL.
+ DefaultURL = "http://127.0.0.1:9200"
+
+ // DefaultScheme is the default protocol scheme to use when sniffing
+ // the Elasticsearch cluster.
+ DefaultScheme = "http"
+
+ // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
+ DefaultHealthcheckEnabled = true
+
+ // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
+ // for a response from Elasticsearch on startup, i.e. when creating a
+ // client. After the client is started, a shorter timeout is commonly used
+ // (its default is specified in DefaultHealthcheckTimeout).
+ DefaultHealthcheckTimeoutStartup = 5 * time.Second
+
+ // DefaultHealthcheckTimeout specifies the time a running client waits for
+ // a response from Elasticsearch. Notice that the healthcheck timeout
+ // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
+ DefaultHealthcheckTimeout = 1 * time.Second
+
+ // DefaultHealthcheckInterval is the default interval between
+ // two health checks of the nodes in the cluster.
+ DefaultHealthcheckInterval = 60 * time.Second
+
+ // DefaultSnifferEnabled specifies if the sniffer is enabled by default.
+ DefaultSnifferEnabled = true
+
+ // DefaultSnifferInterval is the interval between two sniffing procedures,
+ // i.e. the lookup of all nodes in the cluster and their addition/removal
+ // from the list of actual connections.
+ DefaultSnifferInterval = 15 * time.Minute
+
+ // DefaultSnifferTimeoutStartup is the default timeout for the sniffing
+ // process that is initiated while creating a new client. For subsequent
+ // sniffing processes, DefaultSnifferTimeout is used (by default).
+ DefaultSnifferTimeoutStartup = 5 * time.Second
+
+ // DefaultSnifferTimeout is the default timeout after which the
+ // sniffing process times out. Notice that for the initial sniffing
+ // process, DefaultSnifferTimeoutStartup is used.
+ DefaultSnifferTimeout = 2 * time.Second
+
+ // DefaultMaxRetries is the number of retries for a single request after
+ // Elastic will give up and return an error. It is zero by default, so
+ // retry is disabled by default.
+ DefaultMaxRetries = 0
+)
+
+var (
+ // ErrNoClient is raised when no Elasticsearch node is available.
+ ErrNoClient = errors.New("no Elasticsearch node available")
+
+ // ErrRetry is raised when a request cannot be executed after the configured
+ // number of retries.
+ ErrRetry = errors.New("cannot connect after several retries")
+
+ // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
+ // didn't return in time.
+ ErrTimeout = errors.New("timeout")
+)
+
+// ClientOptionFunc is a function that configures a Client.
+// It is used in NewClient.
+type ClientOptionFunc func(*Client) error
+
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+ c *http.Client // net/http Client to use for requests
+
+ connsMu sync.RWMutex // connsMu guards the next block
+ conns []*conn // all connections
+ cindex int // index into conns
+
+ mu sync.RWMutex // guards the next block
+ urls []string // set of URLs passed initially to the client
+ running bool // true if the client's background processes are running
+ errorlog *log.Logger // error log for critical messages
+ infolog *log.Logger // information log for e.g. response times
+ tracelog *log.Logger // trace log for debugging
+ maxRetries int // max. number of retries
+ scheme string // http or https
+ healthcheckEnabled bool // healthchecks enabled or disabled
+ healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
+ healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch
+ healthcheckInterval time.Duration // interval between healthchecks
+ healthcheckStop chan bool // notify healthchecker to stop, and notify back
+ snifferEnabled bool // sniffer enabled or disabled
+ snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
+ snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
+ snifferInterval time.Duration // interval between sniffing
+ snifferStop chan bool // notify sniffer to stop, and notify back
+ decoder Decoder // used to decode data sent from Elasticsearch
+}
+
+// NewClient creates a new client to work with Elasticsearch.
+//
+// The caller can configure the new client by passing configuration options
+// to the func.
+//
+// Example:
+//
+// client, err := elastic.NewClient(
+// elastic.SetURL("http://localhost:9200", "http://localhost:9201"),
+// elastic.SetMaxRetries(10))
+//
+// If no URL is configured, Elastic uses DefaultURL by default.
+//
+// If the sniffer is enabled (the default), the new client then sniffes
+// the cluster via the Nodes Info API
+// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info).
+// It uses the URLs specified by the caller. The caller is responsible
+// to only pass a list of URLs of nodes that belong to the same cluster.
+// This sniffing process is run on startup and periodically.
+// Use SnifferInterval to set the interval between two sniffs (default is
+// 15 minutes). In other words: By default, the client will find new nodes
+// in the cluster and remove those that are no longer available every
+// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
+//
+// The list of nodes found in the sniffing process will be used to make
+// connections to the REST API of Elasticsearch. These nodes are also
+// periodically checked in a shorter time frame. This process is called
+// a health check. By default, a health check is done every 60 seconds.
+// You can set a shorter or longer interval by SetHealthcheckInterval.
+// Disabling health checks is not recommended, but can be done by
+// SetHealthcheck(false).
+//
+// Connections are automatically marked as dead or healthy while
+// making requests to Elasticsearch. When a request fails, Elastic will
+// retry up to a maximum number of retries configured with SetMaxRetries.
+// Retries are disabled by default.
+//
+// If no HttpClient is configured, then http.DefaultClient is used.
+// You can use your own http.Client with some http.Transport for
+// advanced scenarios.
+//
+// An error is also returned when some configuration option is invalid or
+// the new client cannot sniff the cluster (if enabled).
+func NewClient(options ...ClientOptionFunc) (*Client, error) {
+ // Set up the client
+ c := &Client{
+ c: http.DefaultClient,
+ conns: make([]*conn, 0),
+ cindex: -1,
+ scheme: DefaultScheme,
+ decoder: &DefaultDecoder{},
+ maxRetries: DefaultMaxRetries,
+ healthcheckEnabled: DefaultHealthcheckEnabled,
+ healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
+ healthcheckTimeout: DefaultHealthcheckTimeout,
+ healthcheckInterval: DefaultHealthcheckInterval,
+ healthcheckStop: make(chan bool),
+ snifferEnabled: DefaultSnifferEnabled,
+ snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
+ snifferTimeout: DefaultSnifferTimeout,
+ snifferInterval: DefaultSnifferInterval,
+ snifferStop: make(chan bool),
+ }
+
+ // Run the options on it
+ for _, option := range options {
+ if err := option(c); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(c.urls) == 0 {
+ c.urls = []string{DefaultURL}
+ }
+ c.urls = canonicalize(c.urls...)
+
+ // Check if we can make a request to any of the specified URLs
+ if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil {
+ return nil, err
+ }
+
+ if c.snifferEnabled {
+ // Sniff the cluster initially
+ if err := c.sniff(c.snifferTimeoutStartup); err != nil {
+ return nil, err
+ }
+ } else {
+ // Do not sniff the cluster initially. Use the provided URLs instead.
+ for _, url := range c.urls {
+ c.conns = append(c.conns, newConn(url, url))
+ }
+ }
+
+ // Perform an initial health check and
+ // ensure that we have at least one connection available
+ c.healthcheck(c.healthcheckTimeoutStartup, true)
+ if err := c.mustActiveConn(); err != nil {
+ return nil, err
+ }
+
+ go c.sniffer() // periodically update cluster information
+ go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ return c, nil
+}
+
+// SetHttpClient can be used to specify the http.Client to use when making
+// HTTP requests to Elasticsearch.
+func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
+ return func(c *Client) error {
+ if httpClient != nil {
+ c.c = httpClient
+ } else {
+ c.c = http.DefaultClient
+ }
+ return nil
+ }
+}
+
+// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
+// when sniffing is enabled, these URLs are used to initially sniff the
+// cluster on startup.
+func SetURL(urls ...string) ClientOptionFunc {
+ return func(c *Client) error {
+ switch len(urls) {
+ case 0:
+ c.urls = []string{DefaultURL}
+ default:
+ c.urls = urls
+ }
+ return nil
+ }
+}
+
+// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
+// This is http by default.
+func SetScheme(scheme string) ClientOptionFunc {
+ return func(c *Client) error {
+ c.scheme = scheme
+ return nil
+ }
+}
+
+// SetSniff enables or disables the sniffer (enabled by default).
+func SetSniff(enabled bool) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferEnabled = enabled
+ return nil
+ }
+}
+
+// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
+// when creating a new client. The default is 5 seconds. Notice that the
+// timeout being used for subsequent sniffing processes is set with
+// SetSnifferTimeout.
+func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferTimeoutStartup = timeout
+ return nil
+ }
+}
+
+// SetSnifferTimeout sets the timeout for the sniffer that finds the
+// nodes in a cluster. The default is 2 seconds. Notice that the timeout
+// used when creating a new client on startup is usually greater and can
+// be set with SetSnifferTimeoutStartup.
+func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferTimeout = timeout
+ return nil
+ }
+}
+
+// SetSnifferInterval sets the interval between two sniffing processes.
+// The default interval is 15 minutes.
+func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.snifferInterval = interval
+ return nil
+ }
+}
+
+// SetHealthcheck enables or disables healthchecks (enabled by default).
+func SetHealthcheck(enabled bool) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckEnabled = enabled
+ return nil
+ }
+}
+
+// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
+// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
+// Notice that timeouts for subsequent health checks can be modified with
+// SetHealthcheckTimeout.
+func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckTimeoutStartup = timeout
+ return nil
+ }
+}
+
+// SetHealthcheckTimeout sets the timeout for periodic health checks.
+// The default timeout is 1 second (see DefaultHealthcheckTimeout).
+// Notice that a different (usually larger) timeout is used for the initial
+// healthcheck, which is initiated while creating a new client.
+// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
+func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckTimeout = timeout
+ return nil
+ }
+}
+
+// SetHealthcheckInterval sets the interval between two health checks.
+// The default interval is 60 seconds.
+func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
+ return func(c *Client) error {
+ c.healthcheckInterval = interval
+ return nil
+ }
+}
+
+// SetMaxRetries sets the maximum number of retries before giving up when
+// performing a HTTP request to Elasticsearch.
+func SetMaxRetries(maxRetries int) func(*Client) error {
+ return func(c *Client) error {
+ if maxRetries < 0 {
+ return errors.New("MaxRetries must be greater than or equal to 0")
+ }
+ c.maxRetries = maxRetries
+ return nil
+ }
+}
+
+// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
+// DefaultDecoder is used by default.
+func SetDecoder(decoder Decoder) func(*Client) error {
+ return func(c *Client) error {
+ if decoder != nil {
+ c.decoder = decoder
+ } else {
+ c.decoder = &DefaultDecoder{}
+ }
+ return nil
+ }
+}
+
+// SetErrorLog sets the logger for critical messages like nodes joining
+// or leaving the cluster or failing requests. It is nil by default.
+func SetErrorLog(logger *log.Logger) func(*Client) error {
+ return func(c *Client) error {
+ c.errorlog = logger
+ return nil
+ }
+}
+
+// SetInfoLog sets the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func SetInfoLog(logger *log.Logger) func(*Client) error {
+ return func(c *Client) error {
+ c.infolog = logger
+ return nil
+ }
+}
+
+// SetTraceLog specifies the log.Logger to use for output of HTTP requests
+// and responses which is helpful during debugging. It is nil by default.
+func SetTraceLog(logger *log.Logger) func(*Client) error {
+ return func(c *Client) error {
+ c.tracelog = logger
+ return nil
+ }
+}
+
+// String returns a string representation of the client status.
+func (c *Client) String() string {
+ c.connsMu.Lock()
+ conns := c.conns
+ c.connsMu.Unlock()
+
+ var buf bytes.Buffer
+ for i, conn := range conns {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(conn.String())
+ }
+ return buf.String()
+}
+
+// IsRunning returns true if the background processes of the client are
+// running, false otherwise.
+func (c *Client) IsRunning() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.running
+}
+
+// Start starts the background processes like sniffing the cluster and
+// periodic health checks. You don't need to run Start when creating a
+// client with NewClient; the background processes are run by default.
+//
+// If the background processes are already running, this is a no-op.
+func (c *Client) Start() {
+ c.mu.RLock()
+ if c.running {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ go c.sniffer()
+ go c.healthchecker()
+
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+
+ c.infof("elastic: client started")
+}
+
+// Stop stops the background processes that the client is running,
+// i.e. sniffing the cluster periodically and running health checks
+// on the nodes.
+//
+// If the background processes are not running, this is a no-op.
+func (c *Client) Stop() {
+ c.mu.RLock()
+ if !c.running {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ c.healthcheckStop <- true
+ <-c.healthcheckStop
+
+ c.snifferStop <- true
+ <-c.snifferStop
+
+ c.mu.Lock()
+ c.running = false
+ c.mu.Unlock()
+
+ c.infof("elastic: client stopped")
+}
+
+// errorf logs to the error log.
+func (c *Client) errorf(format string, args ...interface{}) {
+ if c.errorlog != nil {
+ c.errorlog.Printf(format, args...)
+ }
+}
+
+// infof logs informational messages.
+func (c *Client) infof(format string, args ...interface{}) {
+ if c.infolog != nil {
+ c.infolog.Printf(format, args...)
+ }
+}
+
+// tracef logs to the trace log.
+func (c *Client) tracef(format string, args ...interface{}) {
+ if c.tracelog != nil {
+ c.tracelog.Printf(format, args...)
+ }
+}
+
+// dumpRequest dumps the given HTTP request to the trace log.
+func (c *Client) dumpRequest(r *http.Request) {
+ if c.tracelog != nil {
+ out, err := httputil.DumpRequestOut(r, true)
+ if err == nil {
+ c.tracef("%s\n", string(out))
+ }
+ }
+}
+
+// dumpResponse dumps the given HTTP response to the trace log.
+func (c *Client) dumpResponse(resp *http.Response) {
+ if c.tracelog != nil {
+ out, err := httputil.DumpResponse(resp, true)
+ if err == nil {
+ c.tracef("%s\n", string(out))
+ }
+ }
+}
+
+// sniffer periodically runs sniff.
+func (c *Client) sniffer() {
+ for {
+ c.mu.RLock()
+ timeout := c.snifferTimeout
+ ticker := time.After(c.snifferInterval)
+ c.mu.RUnlock()
+
+ select {
+ case <-c.snifferStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ c.snifferStop <- true
+ return
+ case <-ticker:
+ c.sniff(timeout)
+ }
+ }
+}
+
+// sniff uses the Node Info API to return the list of nodes in the cluster.
+// It uses the list of URLs passed on startup plus the list of URLs found
+// by the preceding sniffing process (if sniffing is enabled).
+//
+// If sniffing is disabled, this is a no-op.
+func (c *Client) sniff(timeout time.Duration) error {
+ c.mu.RLock()
+ if !c.snifferEnabled {
+ c.mu.RUnlock()
+ return nil
+ }
+
+ // Use all available URLs provided to sniff the cluster.
+ urlsMap := make(map[string]bool)
+ urls := make([]string, 0)
+
+ // Add all URLs provided on startup
+ for _, url := range c.urls {
+ urlsMap[url] = true
+ urls = append(urls, url)
+ }
+ c.mu.RUnlock()
+
+ // Add all URLs found by sniffing
+ c.connsMu.RLock()
+ for _, conn := range c.conns {
+ if !conn.IsDead() {
+ url := conn.URL()
+ if _, found := urlsMap[url]; !found {
+ urls = append(urls, url)
+ }
+ }
+ }
+ c.connsMu.RUnlock()
+
+ if len(urls) == 0 {
+ return ErrNoClient
+ }
+
+ // Start sniffing on all found URLs
+ ch := make(chan []*conn, len(urls))
+ for _, url := range urls {
+ go func(url string) { ch <- c.sniffNode(url) }(url)
+ }
+
+ // Wait for the results to come back, or the process times out.
+ for {
+ select {
+ case conns := <-ch:
+ if len(conns) > 0 {
+ c.updateConns(conns)
+ return nil
+ }
+ case <-time.After(timeout):
+ // We get here if no cluster responds in time
+ return ErrNoClient
+ }
+ }
+}
+
+// reSniffHostAndPort is used to extract hostname and port from a result
+// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
+var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
+
+// sniffNode sniffs a single node. This method is run as a goroutine
+// in sniff. If successful, it returns the list of node URLs extracted
+// from the result of calling Nodes Info API. Otherwise, an empty array
+// is returned.
+func (c *Client) sniffNode(url string) []*conn {
+ nodes := make([]*conn, 0)
+
+ // Call the Nodes Info API at /_nodes/http
+ req, err := NewRequest("GET", url+"/_nodes/http")
+ if err != nil {
+ return nodes
+ }
+
+ res, err := c.c.Do((*http.Request)(req))
+ if err != nil {
+ return nodes
+ }
+ if res == nil {
+ return nodes
+ }
+
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+
+ var info NodesInfoResponse
+ if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
+ if len(info.Nodes) > 0 {
+ switch c.scheme {
+ case "https":
+ for nodeID, node := range info.Nodes {
+ m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress)
+ if len(m) == 3 {
+ url := fmt.Sprintf("https://%s:%s", m[1], m[2])
+ nodes = append(nodes, newConn(nodeID, url))
+ }
+ }
+ default:
+ for nodeID, node := range info.Nodes {
+ m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress)
+ if len(m) == 3 {
+ url := fmt.Sprintf("http://%s:%s", m[1], m[2])
+ nodes = append(nodes, newConn(nodeID, url))
+ }
+ }
+ }
+ }
+ }
+ return nodes
+}
+
+// updateConns updates the clients' connections with new information
+// gather by a sniff operation.
+func (c *Client) updateConns(conns []*conn) {
+ c.connsMu.Lock()
+
+ newConns := make([]*conn, 0)
+
+ // Build up new connections:
+ // If we find an existing connection, use that (including no. of failures etc.).
+ // If we find a new connection, add it.
+ for _, conn := range conns {
+ var found bool
+ for _, oldConn := range c.conns {
+ if oldConn.NodeID() == conn.NodeID() {
+ // Take over the old connection
+ newConns = append(newConns, oldConn)
+ found = true
+ break
+ }
+ }
+ if !found {
+ // New connection didn't exist, so add it to our list of new conns.
+ c.errorf("elastic: %s joined the cluster", conn.URL())
+ newConns = append(newConns, conn)
+ }
+ }
+
+ c.conns = newConns
+ c.cindex = -1
+ c.connsMu.Unlock()
+}
+
+// healthchecker periodically runs healthcheck.
+func (c *Client) healthchecker() {
+ for {
+ c.mu.RLock()
+ timeout := c.healthcheckTimeout
+ ticker := time.After(c.healthcheckInterval)
+ c.mu.RUnlock()
+
+ select {
+ case <-c.healthcheckStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ c.healthcheckStop <- true
+ return
+ case <-ticker:
+ c.healthcheck(timeout, false)
+ }
+ }
+}
+
+// healthcheck does a health check on all nodes in the cluster. Depending on
+// the node state, it marks connections as dead, sets them alive etc.
+// If healthchecks are disabled this is a no-op.
+// The timeout specifies how long to wait for a response from Elasticsearch.
+func (c *Client) healthcheck(timeout time.Duration, force bool) {
+ c.mu.RLock()
+ if !c.healthcheckEnabled && !force {
+ c.mu.RUnlock()
+ return
+ }
+ c.mu.RUnlock()
+
+ c.connsMu.RLock()
+ conns := c.conns
+ c.connsMu.RUnlock()
+
+ timeoutInMillis := int64(timeout / time.Millisecond)
+
+ for _, conn := range conns {
+ params := make(url.Values)
+ params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis))
+ req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode())
+ if err == nil {
+ res, err := c.c.Do((*http.Request)(req))
+ if err == nil {
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+ if res.StatusCode >= 200 && res.StatusCode < 300 {
+ conn.MarkAsAlive()
+ } else {
+ conn.MarkAsDead()
+ c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode)
+ }
+ } else {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ }
+ } else {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ }
+ }
+}
+
+// startupHealthcheck is used at startup to check if the server is available
+// at all.
+func (c *Client) startupHealthcheck(timeout time.Duration) error {
+ c.mu.Lock()
+ urls := c.urls
+ c.mu.Unlock()
+
+ // If we don't get a connection after "timeout", we bail.
+ start := time.Now()
+ for {
+ cl := &http.Client{Timeout: timeout}
+ for _, url := range urls {
+ res, err := cl.Head(url)
+ if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
+ return nil
+ }
+ }
+ time.Sleep(1 * time.Second)
+ if time.Now().Sub(start) > timeout {
+ break
+ }
+ }
+ return ErrNoClient
+}
+
+// next returns the next available connection, or ErrNoClient.
+func (c *Client) next() (*conn, error) {
+ // We do round-robin here.
+ // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ i := 0
+ numConns := len(c.conns)
+ for {
+ i += 1
+ if i > numConns {
+ break // we visited all conns: they all seem to be dead
+ }
+ c.cindex += 1
+ if c.cindex >= numConns {
+ c.cindex = 0
+ }
+ conn := c.conns[c.cindex]
+ if !conn.IsDead() {
+ return conn, nil
+ }
+ }
+
+ // TODO(oe) As a last resort, we could try to awake a dead connection here.
+
+ // We tried hard, but there is no node available
+ return nil, ErrNoClient
+}
+
+// mustActiveConn returns nil if there is an active connection,
+// otherwise ErrNoClient is returned.
+func (c *Client) mustActiveConn() error {
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ for _, c := range c.conns {
+ if !c.IsDead() {
+ return nil
+ }
+ }
+ return ErrNoClient
+}
+
+// PerformRequest does a HTTP request to Elasticsearch.
+// It returns a response and an error on failure.
+func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}) (*Response, error) {
+ start := time.Now().UTC()
+
+ c.mu.RLock()
+ timeout := c.healthcheckTimeout
+ retries := c.maxRetries
+ c.mu.RUnlock()
+
+ var err error
+ var conn *conn
+ var req *Request
+ var resp *Response
+ var retried bool
+
+ // We wait between retries, using simple exponential back-off.
+ // TODO: Make this configurable, including the jitter.
+ retryWaitMsec := int64(100 + (rand.Intn(20) - 10))
+
+ for {
+ pathWithParams := path
+ if len(params) > 0 {
+ pathWithParams += "?" + params.Encode()
+ }
+
+ // Get a connection
+ conn, err = c.next()
+ if err == ErrNoClient {
+ if !retried {
+ // Force a healtcheck as all connections seem to be dead.
+ c.healthcheck(timeout, false)
+ }
+ retries -= 1
+ if retries <= 0 {
+ return nil, err
+ }
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ }
+ if err != nil {
+ c.errorf("elastic: cannot get connection from pool")
+ return nil, err
+ }
+
+ req, err = NewRequest(method, conn.URL()+pathWithParams)
+ if err != nil {
+ c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err)
+ return nil, err
+ }
+
+ // Set body
+ if body != nil {
+ switch b := body.(type) {
+ case string:
+ req.SetBodyString(b)
+ break
+ default:
+ req.SetBodyJson(body)
+ break
+ }
+ }
+
+ // Tracing
+ c.dumpRequest((*http.Request)(req))
+
+ // Get response
+ res, err := c.c.Do((*http.Request)(req))
+ if err != nil {
+ retries -= 1
+ if retries <= 0 {
+ c.errorf("elastic: %s is dead", conn.URL())
+ conn.MarkAsDead()
+ return nil, err
+ }
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ }
+ if res.Body != nil {
+ defer res.Body.Close()
+ }
+
+ // Check for errors
+ if err := checkResponse(res); err != nil {
+ retries -= 1
+ if retries <= 0 {
+ return nil, err
+ }
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ }
+
+ // Tracing
+ c.dumpResponse(res)
+
+ // We successfully made a request with this connection
+ conn.MarkAsHealthy()
+
+ resp, err = c.newResponse(res)
+ if err != nil {
+ return nil, err
+ }
+
+ break
+ }
+
+ duration := time.Now().UTC().Sub(start)
+ c.infof("%s %s [status:%d, request:%.3fs]",
+ strings.ToUpper(method),
+ req.URL,
+ resp.StatusCode,
+ float64(int64(duration/time.Millisecond))/1000)
+
+ return resp, nil
+}
+
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+ res, _, err := c.Ping().URL(url).Do()
+ if err != nil {
+ return "", err
+ }
+ return res.Version.Number, nil
+}
+
+// IndexNames returns the names of all indices in the cluster.
+func (c *Client) IndexNames() ([]string, error) {
+ res, err := c.IndexGetSettings().Index("_all").Do()
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for name, _ := range res {
+ names = append(names, name)
+ }
+ return names, nil
+}
+
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+func (c *Client) Ping() *PingService {
+ return NewPingService(c)
+}
+
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *CreateIndexService {
+ builder := NewCreateIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(name string) *DeleteIndexService {
+ builder := NewDeleteIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(name string) *IndexExistsService {
+ builder := NewIndexExistsService(c)
+ builder.Index(name)
+ return builder
+}
+
+// TypeExists allows to check if one or more types exist in one or more indices.
+func (c *Client) TypeExists() *IndicesExistsTypeService {
+ return NewIndicesExistsTypeService(c)
+}
+
+// IndexStats provides statistics on different operations happining
+// in one or more indices.
+func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
+ builder := NewIndicesStatsService(c)
+ builder = builder.Index(indices...)
+ return builder
+}
+
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *OpenIndexService {
+ builder := NewOpenIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *CloseIndexService {
+ builder := NewCloseIndexService(c)
+ builder.Index(name)
+ return builder
+}
+
+// Index a document.
+func (c *Client) Index() *IndexService {
+ builder := NewIndexService(c)
+ return builder
+}
+
+// IndexGet retrieves information about one or more indices.
+// IndexGet is only available for Elasticsearch 1.4 or later.
+func (c *Client) IndexGet() *IndicesGetService {
+ builder := NewIndicesGetService(c)
+ return builder
+}
+
+// IndexGetSettings retrieves settings about one or more indices.
+func (c *Client) IndexGetSettings() *IndicesGetSettingsService {
+ builder := NewIndicesGetSettingsService(c)
+ return builder
+}
+
+// Update a document.
+func (c *Client) Update() *UpdateService {
+ builder := NewUpdateService(c)
+ return builder
+}
+
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+ builder := NewDeleteService(c)
+ return builder
+}
+
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery() *DeleteByQueryService {
+ builder := NewDeleteByQueryService(c)
+ return builder
+}
+
+// Get a document.
+func (c *Client) Get() *GetService {
+ builder := NewGetService(c)
+ return builder
+}
+
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MultiGetService {
+ builder := NewMultiGetService(c)
+ return builder
+}
+
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+ builder := NewExistsService(c)
+ return builder
+}
+
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+ builder := NewCountService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+ builder := NewSearchService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Percolate allows to send a document and return matching queries.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html.
+func (c *Client) Percolate() *PercolateService {
+ builder := NewPercolateService(c)
+ return builder
+}
+
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+ return NewMultiSearchService(c)
+}
+
+// Suggest returns a service to return suggestions.
+func (c *Client) Suggest(indices ...string) *SuggestService {
+ builder := NewSuggestService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Scan through documents. Use this to iterate inside a server process
+// where the results will be processed without returning them to a client.
+func (c *Client) Scan(indices ...string) *ScanService {
+ builder := NewScanService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client. Use Scan when you don't need
+// to return requests to a client (i.e. not paginating via request/response).
+func (c *Client) Scroll(indices ...string) *ScrollService {
+ builder := NewScrollService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// ClearScroll can be used to clear search contexts manually.
+func (c *Client) ClearScroll() *ClearScrollService {
+ builder := NewClearScrollService(c)
+ return builder
+}
+
+// Optimize asks Elasticsearch to optimize one or more indices.
+func (c *Client) Optimize(indices ...string) *OptimizeService {
+ builder := NewOptimizeService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+ builder := NewRefreshService(c)
+ builder.Indices(indices...)
+ return builder
+}
+
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush() *FlushService {
+ builder := NewFlushService(c)
+ return builder
+}
+
+// Explain computes a score explanation for a query and a specific document.
+func (c *Client) Explain(index, typ, id string) *ExplainService {
+ builder := NewExplainService(c)
+ builder = builder.Index(index).Type(typ).Id(id)
+ return builder
+}
+
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+ builder := NewBulkService(c)
+ return builder
+}
+
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+ builder := NewAliasService(c)
+ return builder
+}
+
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+ builder := NewAliasesService(c)
+ return builder
+}
+
+// GetTemplate gets a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) GetTemplate() *GetTemplateService {
+ return NewGetTemplateService(c)
+}
+
+// PutTemplate creates or updates a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) PutTemplate() *PutTemplateService {
+ return NewPutTemplateService(c)
+}
+
+// DeleteTemplate deletes a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) DeleteTemplate() *DeleteTemplateService {
+ return NewDeleteTemplateService(c)
+}
+
+// IndexGetTemplate gets an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
+ builder := NewIndicesGetTemplateService(c)
+ builder = builder.Name(names...)
+ return builder
+}
+
+// IndexTemplateExists gets check if an index template exists.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
+ builder := NewIndicesExistsTemplateService(c)
+ builder = builder.Name(name)
+ return builder
+}
+
+// IndexPutTemplate creates or updates an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
+ builder := NewIndicesPutTemplateService(c)
+ builder = builder.Name(name)
+ return builder
+}
+
+// IndexDeleteTemplate deletes an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
+ builder := NewIndicesDeleteTemplateService(c)
+ builder = builder.Name(name)
+ return builder
+}
+
+// GetMapping gets a mapping.
+func (c *Client) GetMapping() *GetMappingService {
+ return NewGetMappingService(c)
+}
+
+// PutMapping registers a mapping.
+func (c *Client) PutMapping() *PutMappingService {
+ return NewPutMappingService(c)
+}
+
+// DeleteMapping deletes a mapping.
+func (c *Client) DeleteMapping() *DeleteMappingService {
+ return NewDeleteMappingService(c)
+}
+
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+ return NewClusterHealthService(c)
+}
+
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+ return NewClusterStateService(c)
+}
+
+// ClusterStats retrieves cluster statistics.
+func (c *Client) ClusterStats() *ClusterStatsService {
+ return NewClusterStatsService(c)
+}
+
+// NodesInfo retrieves one or more or all of the cluster nodes information.
+func (c *Client) NodesInfo() *NodesInfoService {
+ return NewNodesInfoService(c)
+}
+
+// Reindex returns a service that will reindex documents from a source
+// index into a target index. See
+// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer {
+ return NewReindexer(c, sourceIndex, CopyToTargetIndex(targetIndex))
+}
+
+// WaitForStatus waits for the cluster to have the given status.
+// This is a shortcut method for the ClusterHealth service.
+//
+// WaitForStatus waits for the specified timeout, e.g. "10s".
+// If the cluster will have the given state within the timeout, nil is returned.
+// If the request timed out, ErrTimeout is returned.
+func (c *Client) WaitForStatus(status string, timeout string) error {
+ health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do()
+ if err != nil {
+ return err
+ }
+ if health.TimedOut {
+ return ErrTimeout
+ }
+ return nil
+}
+
+// WaitForGreenStatus waits for the cluster to have the "green" status.
+// See WaitForStatus for more details.
+func (c *Client) WaitForGreenStatus(timeout string) error {
+ return c.WaitForStatus("green", timeout)
+}
+
+// WaitForYellowStatus waits for the cluster to have the "yellow" status.
+// See WaitForStatus for more details.
+func (c *Client) WaitForYellowStatus(timeout string) error {
+ return c.WaitForStatus("yellow", timeout)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/Makefile b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/Makefile
new file mode 100644
index 0000000..cc6261d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/Makefile
@@ -0,0 +1,16 @@
+.PHONY: build run-omega-cluster-test
+
+default: build
+
+build:
+ go build cluster-test.go
+
+run-omega-cluster-test:
+ go run -race cluster-test.go \
+ -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
+ -n=5 \
+ -retries=5 \
+ -sniff=true -sniffer=10s \
+ -healthcheck=true -healthchecker=5s \
+ -errorlog=errors.log
+
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/README.md b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/README.md
new file mode 100644
index 0000000..f10748c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/README.md
@@ -0,0 +1,63 @@
+# Cluster Test
+
+This directory contains a program you can use to test a cluster.
+
+Here's how:
+
+First, install a cluster of Elasticsearch nodes. You can install them on
+different computers, or start several nodes on a single machine.
+
+Build cluster-test by `go build cluster-test.go` (or build with `make`).
+
+Run `./cluster-test -h` to get a list of flags:
+
+```sh
+$ ./cluster-test -h
+Usage of ./cluster-test:
+ -errorlog="": error log file
+ -healthcheck=true: enable or disable healthchecks
+ -healthchecker=1m0s: healthcheck interval
+ -index="twitter": name of ES index to use
+ -infolog="": info log file
+ -n=5: number of goroutines that run searches
+ -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
+ -retries=0: number of retries
+ -sniff=true: enable or disable sniffer
+ -sniffer=15m0s: sniffer interval
+ -tracelog="": trace log file
+```
+
+Example:
+
+```sh
+$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
+```
+
+The above example will create an index and start some search jobs on the
+cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
+and http://127.0.0.1:9202.
+
+* It will create an index called `twitter` on the cluster (`-index=twitter`)
+* It will run 5 search jobs in parallel (`-n=5`).
+* It will retry failed requests 5 times (`-retries=5`).
+* It will sniff the cluster periodically (`-sniff=true`).
+* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
+* It will perform health checks periodically (`-healthcheck=true`).
+* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
+* It will write an error log file (`-errorlog=error.log`).
+
+If you want to test Elastic with nodes going up and down, you can use a
+chaos monkey script like this and run it on the nodes of your cluster:
+
+```sh
+#!/bin/bash
+while true
+do
+ echo "Starting ES node"
+ elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
+ sleep `jot -r 1 10 300` # wait for 10-300s
+ echo "Stopping ES node"
+ kill -TERM `cat es.pid`
+ sleep `jot -r 1 10 60` # wait for 10-60s
+done
+```
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/cluster-test.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/cluster-test.go
new file mode 100644
index 0000000..a9ce8bb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster-test/cluster-test.go
@@ -0,0 +1,357 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "os"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ elastic "gopkg.in/olivere/elastic.v2"
+)
+
+type Tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Retweets int `json:"retweets"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+var (
+ nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
+ n = flag.Int("n", 5, "number of goroutines that run searches")
+ index = flag.String("index", "twitter", "name of ES index to use")
+ errorlogfile = flag.String("errorlog", "", "error log file")
+ infologfile = flag.String("infolog", "", "info log file")
+ tracelogfile = flag.String("tracelog", "", "trace log file")
+ retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries")
+ sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
+ sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
+ healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
+ healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
+)
+
+func main() {
+ flag.Parse()
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ if *nodes == "" {
+ log.Fatal("no nodes specified")
+ }
+ urls := strings.SplitN(*nodes, ",", -1)
+
+ testcase, err := NewTestCase(*index, urls)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ testcase.SetErrorLogFile(*errorlogfile)
+ testcase.SetInfoLogFile(*infologfile)
+ testcase.SetTraceLogFile(*tracelogfile)
+ testcase.SetMaxRetries(*retries)
+ testcase.SetHealthcheck(*healthcheck)
+ testcase.SetHealthcheckInterval(*healthchecker)
+ testcase.SetSniff(*sniff)
+ testcase.SetSnifferInterval(*sniffer)
+
+ if err := testcase.Run(*n); err != nil {
+ log.Fatal(err)
+ }
+
+ select {}
+}
+
+type RunInfo struct {
+ Success bool
+}
+
+type TestCase struct {
+ nodes []string
+ client *elastic.Client
+ runs int64
+ failures int64
+ runCh chan RunInfo
+ index string
+ errorlogfile string
+ infologfile string
+ tracelogfile string
+ maxRetries int
+ healthcheck bool
+ healthcheckInterval time.Duration
+ sniff bool
+ snifferInterval time.Duration
+}
+
+func NewTestCase(index string, nodes []string) (*TestCase, error) {
+ if index == "" {
+ return nil, errors.New("no index name specified")
+ }
+
+ return &TestCase{
+ index: index,
+ nodes: nodes,
+ runCh: make(chan RunInfo),
+ }, nil
+}
+
+func (t *TestCase) SetIndex(name string) {
+ t.index = name
+}
+
+func (t *TestCase) SetErrorLogFile(name string) {
+ t.errorlogfile = name
+}
+
+func (t *TestCase) SetInfoLogFile(name string) {
+ t.infologfile = name
+}
+
+func (t *TestCase) SetTraceLogFile(name string) {
+ t.tracelogfile = name
+}
+
+func (t *TestCase) SetMaxRetries(n int) {
+ t.maxRetries = n
+}
+
+func (t *TestCase) SetSniff(enabled bool) {
+ t.sniff = enabled
+}
+
+func (t *TestCase) SetSnifferInterval(d time.Duration) {
+ t.snifferInterval = d
+}
+
+func (t *TestCase) SetHealthcheck(enabled bool) {
+ t.healthcheck = enabled
+}
+
+func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
+ t.healthcheckInterval = d
+}
+
+func (t *TestCase) Run(n int) error {
+ if err := t.setup(); err != nil {
+ return err
+ }
+
+ for i := 1; i < n; i++ {
+ go t.search()
+ }
+
+ go t.monitor()
+
+ return nil
+}
+
+func (t *TestCase) monitor() {
+ print := func() {
+ fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ")
+ }
+
+ for {
+ select {
+ case run := <-t.runCh:
+ atomic.AddInt64(&t.runs, 1)
+ if !run.Success {
+ atomic.AddInt64(&t.failures, 1)
+ fmt.Println()
+ }
+ print()
+ case <-time.After(5 * time.Second):
+ // Print stats after some inactivity
+ print()
+ break
+ }
+ }
+}
+
+func (t *TestCase) setup() error {
+ var errorlogger *log.Logger
+ if t.errorlogfile != "" {
+ f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
+ }
+
+ var infologger *log.Logger
+ if t.infologfile != "" {
+ f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ infologger = log.New(f, "", log.LstdFlags)
+ }
+
+ // Trace request and response details like this
+ var tracelogger *log.Logger
+ if t.tracelogfile != "" {
+ f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ if err != nil {
+ return err
+ }
+ tracelogger = log.New(f, "", log.LstdFlags)
+ }
+
+ client, err := elastic.NewClient(
+ elastic.SetURL(t.nodes...),
+ elastic.SetErrorLog(errorlogger),
+ elastic.SetInfoLog(infologger),
+ elastic.SetTraceLog(tracelogger),
+ elastic.SetMaxRetries(t.maxRetries),
+ elastic.SetSniff(t.sniff),
+ elastic.SetSnifferInterval(t.snifferInterval),
+ elastic.SetHealthcheck(t.healthcheck),
+ elastic.SetHealthcheckInterval(t.healthcheckInterval))
+ if err != nil {
+ // Handle error
+ return err
+ }
+ t.client = client
+
+ // Use the IndexExists service to check if a specified index exists.
+ exists, err := t.client.IndexExists(t.index).Do()
+ if err != nil {
+ return err
+ }
+ if exists {
+ deleteIndex, err := t.client.DeleteIndex(t.index).Do()
+ if err != nil {
+ return err
+ }
+ if !deleteIndex.Acknowledged {
+ return errors.New("delete index not acknowledged")
+ }
+ }
+
+ // Create a new index.
+ createIndex, err := t.client.CreateIndex(t.index).Do()
+ if err != nil {
+ return err
+ }
+ if !createIndex.Acknowledged {
+ return errors.New("create index not acknowledged")
+ }
+
+ // Index a tweet (using JSON serialization)
+ tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+ _, err = t.client.Index().
+ Index(t.index).
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet1).
+ Do()
+ if err != nil {
+ return err
+ }
+
+ // Index a second tweet (by string)
+ tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+ _, err = t.client.Index().
+ Index(t.index).
+ Type("tweet").
+ Id("2").
+ BodyString(tweet2).
+ Do()
+ if err != nil {
+ return err
+ }
+
+ // Flush to make sure the documents got written.
+ _, err = t.client.Flush().Index(t.index).Do()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *TestCase) search() {
+ // Loop forever to check for connection issues
+ for {
+ // Get tweet with specified ID
+ get1, err := t.client.Get().
+ Index(t.index).
+ Type("tweet").
+ Id("1").
+ Do()
+ if err != nil {
+ //failf("Get failed: %v", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+ if !get1.Found {
+ //log.Printf("Document %s not found\n", "1")
+ //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := t.client.Search().
+ Index(t.index). // search in index t.index
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+ if err != nil {
+ //failf("Search failed: %v\n", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+ // Number of hits
+ if searchResult.Hits != nil {
+ //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var tweet Tweet
+ err := json.Unmarshal(*hit.Source, &tweet)
+ if err != nil {
+ // Deserialization failed
+ //failf("Deserialize failed: %v\n", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ }
+
+ // Work with tweet
+ //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ }
+ } else {
+ // No hits
+ //fmt.Print("Found no tweets\n")
+ }
+
+ t.runCh <- RunInfo{Success: true}
+
+ // Sleep some time
+ time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health.go
new file mode 100644
index 0000000..48a354c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_health.go
@@ -0,0 +1,186 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ClusterHealthService allows to get the status of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-health.html.
+type ClusterHealthService struct {
+ client *Client
+ pretty bool
+ indices []string
+ waitForStatus string
+ level string
+ local *bool
+ masterTimeout string
+ timeout string
+ waitForActiveShards *int
+ waitForNodes string
+ waitForRelocatingShards *int
+}
+
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+ return &ClusterHealthService{client: client, indices: make([]string, 0)}
+}
+
+// Index limits the information returned to a specific index.
+func (s *ClusterHealthService) Index(index string) *ClusterHealthService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices limits the information returned to specific indices.
+func (s *ClusterHealthService) Indices(indices ...string) *ClusterHealthService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+ s.timeout = timeout
+ return s
+}
+
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+ s.waitForActiveShards = &waitForActiveShards
+ return s
+}
+
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+ s.waitForNodes = waitForNodes
+ return s
+}
+
+// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
+func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
+ s.waitForRelocatingShards = &waitForRelocatingShards
+ return s
+}
+
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+ s.waitForStatus = waitForStatus
+ return s
+}
+
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+ s.level = level
+ return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+ s.local = &local
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+ "index": strings.Join(s.indices, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.waitForRelocatingShards != nil {
+ params.Set("wait_for_relocating_shards", fmt.Sprintf("%d", *s.waitForRelocatingShards))
+ }
+ if s.waitForStatus != "" {
+ params.Set("wait_for_status", s.waitForStatus)
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.waitForActiveShards != nil {
+ params.Set("wait_for_active_shards", fmt.Sprintf("%d", *s.waitForActiveShards))
+ }
+ if s.waitForNodes != "" {
+ params.Set("wait_for_nodes", s.waitForNodes)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterHealthService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ resp := new(ClusterHealthResponse)
+ if err := json.Unmarshal(res.Body, resp); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Status string `json:"status"`
+ TimedOut bool `json:"timed_out"`
+ NumberOfNodes int `json:"number_of_nodes"`
+ NumberOfDataNodes int `json:"number_of_data_nodes"`
+ ActivePrimaryShards int `json:"active_primary_shards"`
+ ActiveShards int `json:"active_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializingShards int `json:"initializing_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
+ NumberOfPendingTasks int `json:"number_of_pending_tasks"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state.go
new file mode 100644
index 0000000..9361f73
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_state.go
@@ -0,0 +1,197 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ClusterStateService returns the state of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-state.html.
+type ClusterStateService struct {
+ client *Client
+ pretty bool
+ indices []string
+ metrics []string
+ local *bool
+ masterTimeout string
+ flatSettings *bool
+}
+
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+ return &ClusterStateService{
+ client: client,
+ indices: make([]string, 0),
+ metrics: make([]string, 0),
+ }
+}
+
+// Index the name of the index. Use _all or an empty string to perform
+// the operation on all indices.
+func (s *ClusterStateService) Index(index string) *ClusterStateService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Indices(indices ...string) *ClusterStateService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metric string) *ClusterStateService {
+ s.metrics = make([]string, 0)
+ s.metrics = append(s.metrics, metric)
+ return s
+}
+
+// Metrics limits the information returned to the specified metrics.
+// It can be any of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metrics(metrics ...string) *ClusterStateService {
+ s.metrics = make([]string, 0)
+ s.metrics = append(s.metrics, metrics...)
+ return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+ s.local = &local
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ metrics := strings.Join(s.metrics, ",")
+ if metrics == "" {
+ metrics = "_all"
+ }
+ indices := strings.Join(s.indices, ",")
+ if indices == "" {
+ indices = "_all"
+ }
+ path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+ "metrics": metrics,
+ "indices": indices,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStateService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterStateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Version int `json:"version"`
+ MasterNode string `json:"master_node"`
+ Blocks map[string]interface{} `json:"blocks"`
+ Nodes map[string]*ClusterStateNode `json:"nodes"`
+ Metadata *ClusterStateMetadata `json:"metadata"`
+ RoutingTable map[string]*ClusterStateRoutingTable `json:"routing_table"`
+ RoutingNodes *ClusterStateRoutingNode `json:"routing_nodes"`
+ Allocations []interface{} `json:"allocations"`
+ Customs map[string]interface{} `json:"customs"`
+}
+
+type ClusterStateMetadata struct {
+ Templates map[string]interface{} `json:"templates"`
+ Indices map[string]interface{} `json:"indices"`
+ Repositories map[string]interface{} `json:"repositories"`
+}
+
+type ClusterStateNode struct {
+ Name string `json:"name"`
+ TransportAddress string `json:"transport_address"`
+ Attributes map[string]interface{} `json:"attributes"`
+
+ // TODO(oe) are these still valid?
+ State string `json:"state"`
+ Primary bool `json:"primary"`
+ Node string `json:"node"`
+ RelocatingNode *string `json:"relocating_node"`
+ Shard int `json:"shard"`
+ Index string `json:"index"`
+}
+
+type ClusterStateRoutingTable struct {
+ Indices map[string]interface{} `json:"indices"`
+}
+
+type ClusterStateRoutingNode struct {
+ Unassigned []interface{} `json:"unassigned"`
+ Nodes map[string]interface{} `json:"nodes"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats.go
new file mode 100644
index 0000000..a3756b9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/cluster_stats.go
@@ -0,0 +1,349 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ClusterStatsService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-stats.html.
+type ClusterStatsService struct {
+ client *Client
+ pretty bool
+ nodeId []string
+ flatSettings *bool
+ human *bool
+}
+
+// NewClusterStatsService creates a new ClusterStatsService.
+func NewClusterStatsService(client *Client) *ClusterStatsService {
+ return &ClusterStatsService{
+ client: client,
+ nodeId: make([]string, 0),
+ }
+}
+
+// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
+func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
+ s.nodeId = nodeId
+ return s
+}
+
+// FlatSettings is documented as: Return settings in flat format (default: false).
+func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Human is documented as: Whether to return time and byte values in human-readable format..
+func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+
+ if len(s.nodeId) > 0 {
+ path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ } else {
+ path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
+ if err != nil {
+ return "", url.Values{}, err
+ }
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStatsService) Do() (*ClusterStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ClusterStatsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ClusterStatsResponse is the response of ClusterStatsService.Do.
+type ClusterStatsResponse struct {
+ Timestamp int64 `json:"timestamp"`
+ ClusterName string `json:"cluster_name"`
+ ClusterUUID string `json:"uuid"`
+ Status string `json:"status"`
+ Indices *ClusterStatsIndices `json:"indices"`
+ Nodes *ClusterStatsNodes `json:"nodes"`
+}
+
+type ClusterStatsIndices struct {
+ Count int `json:"count"`
+ Shards *ClusterStatsIndicesShards `json:"shards"`
+ Docs *ClusterStatsIndicesDocs `json:"docs"`
+ Store *ClusterStatsIndicesStore `json:"store"`
+ FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
+ FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"`
+ IdCache *ClusterStatsIndicesIdCache `json:"id_cache"`
+ Completion *ClusterStatsIndicesCompletion `json:"completion"`
+ Segments *ClusterStatsIndicesSegments `json:"segments"`
+ Percolate *ClusterStatsIndicesPercolate `json:"percolate"`
+}
+
+type ClusterStatsIndicesShards struct {
+ Total int `json:"total"`
+ Primaries int `json:"primaries"`
+ Replication float64 `json:"replication"`
+ Index *ClusterStatsIndicesShardsIndex `json:"index"`
+}
+
+type ClusterStatsIndicesShardsIndex struct {
+ Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
+ Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
+ Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
+}
+
+type ClusterStatsIndicesShardsIndexIntMinMax struct {
+ Min int `json:"min"`
+ Max int `json:"max"`
+ Avg float64 `json:"avg"`
+}
+
+type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
+ Min float64 `json:"min"`
+ Max float64 `json:"max"`
+ Avg float64 `json:"avg"`
+}
+
+type ClusterStatsIndicesDocs struct {
+ Count int `json:"count"`
+ Deleted int `json:"deleted"`
+}
+
+type ClusterStatsIndicesStore struct {
+ Size string `json:"size"` // e.g. "5.3gb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ ThrottleTime string `json:"throttle_time"` // e.g. "0s"
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
+}
+
+type ClusterStatsIndicesFieldData struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+ Fields map[string]struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ } `json:"fields"`
+}
+
+type ClusterStatsIndicesFilterCache struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+ Evictions int64 `json:"evictions"`
+}
+
+type ClusterStatsIndicesIdCache struct {
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
+}
+
+type ClusterStatsIndicesCompletion struct {
+ Size string `json:"size"` // e.g. "61.3kb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ Fields map[string]struct {
+ Size string `json:"size"` // e.g. "61.3kb"
+ SizeInBytes int64 `json:"size_in_bytes"`
+ } `json:"fields"`
+}
+
+type ClusterStatsIndicesSegments struct {
+ Count int64 `json:"count"`
+ Memory string `json:"memory"` // e.g. "61.3kb"
+ MemoryInBytes int64 `json:"memory_in_bytes"`
+ IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb"
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb"
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
+ VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb"
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
+ FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb"
+ FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
+}
+
+type ClusterStatsIndicesPercolate struct {
+ Total int64 `json:"total"`
+ // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems
+ Time string `json:"get_time"` // e.g. "1s"
+ TimeInBytes int64 `json:"time_in_millis"`
+ Current int64 `json:"current"`
+ MemorySize string `json:"memory_size"` // e.g. "61.3kb"
+ MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"`
+ Queries int64 `json:"queries"`
+}
+
+// ---
+
+type ClusterStatsNodes struct {
+ Count *ClusterStatsNodesCounts `json:"counts"`
+ Versions []string `json:"versions"`
+ OS *ClusterStatsNodesOsStats `json:"os"`
+ Process *ClusterStatsNodesProcessStats `json:"process"`
+ JVM *ClusterStatsNodesJvmStats `json:"jvm"`
+ FS *ClusterStatsNodesFsStats `json:"fs"`
+ Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
+}
+
+type ClusterStatsNodesCounts struct {
+ Total int `json:"total"`
+ MasterOnly int `json:"master_only"`
+ DataOnly int `json:"data_only"`
+ MasterData int `json:"master_data"`
+ Client int `json:"client"`
+}
+
+type ClusterStatsNodesOsStats struct {
+ AvailableProcessors int `json:"available_processors"`
+ Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
+ CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
+}
+
+type ClusterStatsNodesOsStatsMem struct {
+ Total string `json:"total"` // e.g. "16gb"
+ TotalInBytes int64 `json:"total_in_bytes"`
+}
+
+type ClusterStatsNodesOsStatsCPU struct {
+ Vendor string `json:"vendor"`
+ Model string `json:"model"`
+ MHz int `json:"mhz"`
+ TotalCores int `json:"total_cores"`
+ TotalSockets int `json:"total_sockets"`
+ CoresPerSocket int `json:"cores_per_socket"`
+ CacheSize string `json:"cache_size"` // e.g. "256b"
+ CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
+ Count int `json:"count"`
+}
+
+type ClusterStatsNodesProcessStats struct {
+ CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
+ OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
+}
+
+type ClusterStatsNodesProcessStatsCPU struct {
+ Percent float64 `json:"percent"`
+}
+
+type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
+ Min int64 `json:"min"`
+ Max int64 `json:"max"`
+ Avg int64 `json:"avg"`
+}
+
+type ClusterStatsNodesJvmStats struct {
+ MaxUptime string `json:"max_uptime"` // e.g. "5h"
+ MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
+ Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
+ Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
+ Threads int64 `json:"threads"`
+}
+
+type ClusterStatsNodesJvmStatsVersion struct {
+ Version string `json:"version"` // e.g. "1.8.0_45"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ Count int `json:"count"`
+}
+
+type ClusterStatsNodesJvmStatsMem struct {
+ HeapUsed string `json:"heap_used"`
+ HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
+ HeapMax string `json:"heap_max"`
+ HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
+}
+
+type ClusterStatsNodesFsStats struct {
+ Path string `json:"path"`
+ Mount string `json:"mount"`
+ Dev string `json:"dev"`
+ Total string `json:"total"` // e.g. "930.7gb"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ Free string `json:"free"` // e.g. "930.7gb"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ Available string `json:"available"` // e.g. "930.7gb"`
+ AvailableInBytes int64 `json:"available_in_bytes"`
+ DiskReads int64 `json:"disk_reads"`
+ DiskWrites int64 `json:"disk_writes"`
+ DiskIOOp int64 `json:"disk_io_op"`
+ DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
+ DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
+ DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
+ DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
+ DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
+ DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
+ DiskQueue string `json:"disk_queue"`
+ DiskServiceTime string `json:"disk_service_time"`
+}
+
+type ClusterStatsNodesPlugin struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Description string `json:"description"`
+ URL string `json:"url"`
+ JVM bool `json:"jvm"`
+ Site bool `json:"site"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/connection.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/connection.go
new file mode 100644
index 0000000..b8b5bf8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/connection.go
@@ -0,0 +1,90 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// conn represents a single connection to a node in a cluster.
+type conn struct {
+ sync.RWMutex
+ nodeID string // node ID
+ url string
+ failures int
+ dead bool
+ deadSince *time.Time
+}
+
+// newConn creates a new connection to the given URL.
+func newConn(nodeID, url string) *conn {
+ c := &conn{
+ nodeID: nodeID,
+ url: url,
+ }
+ return c
+}
+
+// String returns a representation of the connection status.
+func (c *conn) String() string {
+ c.RLock()
+ defer c.RUnlock()
+ return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
+}
+
+// NodeID returns the ID of the node of this connection.
+func (c *conn) NodeID() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.nodeID
+}
+
+// URL returns the URL of this connection.
+func (c *conn) URL() string {
+ c.RLock()
+ defer c.RUnlock()
+ return c.url
+}
+
+// IsDead returns true if this connection is marked as dead, i.e. a previous
+// request to the URL has been unsuccessful.
+func (c *conn) IsDead() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.dead
+}
+
+// MarkAsDead marks this connection as dead, increments the failures
+// counter and stores the current time in dead since.
+func (c *conn) MarkAsDead() {
+ c.Lock()
+ c.dead = true
+ if c.deadSince == nil {
+ utcNow := time.Now().UTC()
+ c.deadSince = &utcNow
+ }
+ c.failures += 1
+ c.Unlock()
+}
+
+// MarkAsAlive marks this connection as eligible to be returned from the
+// pool of connections by the selector.
+func (c *conn) MarkAsAlive() {
+ c.Lock()
+ c.dead = false
+ c.Unlock()
+}
+
+// MarkAsHealthy marks this connection as healthy, i.e. a request has been
+// successfully performed with it.
+func (c *conn) MarkAsHealthy() {
+ c.Lock()
+ c.dead = false
+ c.deadSince = nil
+ c.failures = 0
+ c.Unlock()
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count.go
new file mode 100644
index 0000000..bb4c0ac
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/count.go
@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+ client *Client
+ indices []string
+ types []string
+ query Query
+ pretty bool
+}
+
+// CountResult is the result returned from using the Count API
+// (http://www.elasticsearch.org/guide/reference/api/count/)
+type CountResult struct {
+ Count int64 `json:"count"`
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
+
+func NewCountService(client *Client) *CountService {
+ builder := &CountService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *CountService) Index(index string) *CountService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *CountService) Indices(indices ...string) *CountService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *CountService) Type(typ string) *CountService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+func (s *CountService) Types(types ...string) *CountService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+func (s *CountService) Query(query Query) *CountService {
+ s.query = query
+ return s
+}
+
+func (s *CountService) Pretty(pretty bool) *CountService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *CountService) Do() (int64, error) {
+ var err error
+
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return 0, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types part
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err = uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return 0, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_count"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Set body if there is a query specified
+ var body interface{}
+ if s.query != nil {
+ query := make(map[string]interface{})
+ query["query"] = s.query.Source()
+ body = query
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return 0, err
+ }
+
+ // Return result
+ ret := new(CountResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return 0, err
+ }
+ if ret != nil {
+ return ret.Count, nil
+ }
+
+ return int64(0), nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/create_index.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/create_index.go
new file mode 100644
index 0000000..28cb6fe
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/create_index.go
@@ -0,0 +1,126 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// CreateIndexService creates a new index.
+type CreateIndexService struct {
+ client *Client
+ pretty bool
+ index string
+ timeout string
+ masterTimeout string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewCreateIndexService returns a new CreateIndexService.
+func NewCreateIndexService(client *Client) *CreateIndexService {
+ return &CreateIndexService{client: client}
+}
+
+// Index is the name of the index to create.
+func (b *CreateIndexService) Index(index string) *CreateIndexService {
+ b.index = index
+ return b
+}
+
+// Timeout the explicit operation timeout, e.g. "5s".
+func (s *CreateIndexService) Timeout(timeout string) *CreateIndexService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CreateIndexService) MasterTimeout(masterTimeout string) *CreateIndexService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Body specifies the configuration of the index as a string.
+// It is an alias for BodyString.
+func (b *CreateIndexService) Body(body string) *CreateIndexService {
+ b.bodyString = body
+ return b
+}
+
+// BodyString specifies the configuration of the index as a string.
+func (b *CreateIndexService) BodyString(body string) *CreateIndexService {
+ b.bodyString = body
+ return b
+}
+
+// BodyJson specifies the configuration of the index. The interface{} will
+// be serializes as a JSON document, so use a map[string]interface{}.
+func (b *CreateIndexService) BodyJson(body interface{}) *CreateIndexService {
+ b.bodyJson = body
+ return b
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (b *CreateIndexService) Pretty(pretty bool) *CreateIndexService {
+ b.pretty = pretty
+ return b
+}
+
+// Do executes the operation.
+func (b *CreateIndexService) Do() (*CreateIndexResult, error) {
+ if b.index == "" {
+ return nil, errors.New("missing index name")
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "1")
+ }
+ if b.masterTimeout != "" {
+ params.Set("master_timeout", b.masterTimeout)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if b.bodyJson != nil {
+ body = b.bodyJson
+ } else {
+ body = b.bodyString
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ ret := new(CreateIndexResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a create index request.
+
+// CreateIndexResult is the outcome of creating a new index.
+type CreateIndexResult struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder.go
new file mode 100644
index 0000000..765a5be
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/decoder.go
@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+)
+
+// Decoder is used to decode responses from Elasticsearch.
+// Users of elastic can implement their own marshaler for advanced purposes
+// and set them per Client (see SetDecoder). If none is specified,
+// DefaultDecoder is used.
+type Decoder interface {
+ Decode(data []byte, v interface{}) error
+}
+
+// DefaultDecoder uses json.Unmarshal from the Go standard library
+// to decode JSON data.
+type DefaultDecoder struct{}
+
+// Decode decodes with json.Unmarshal from the Go standard library.
+func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete.go
new file mode 100644
index 0000000..e6f8870
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete.go
@@ -0,0 +1,130 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type DeleteService struct {
+ client *Client
+ index string
+ _type string
+ id string
+ routing string
+ refresh *bool
+ version *int
+ pretty bool
+}
+
+func NewDeleteService(client *Client) *DeleteService {
+ builder := &DeleteService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *DeleteService) Index(index string) *DeleteService {
+ s.index = index
+ return s
+}
+
+func (s *DeleteService) Type(_type string) *DeleteService {
+ s._type = _type
+ return s
+}
+
+func (s *DeleteService) Id(id string) *DeleteService {
+ s.id = id
+ return s
+}
+
+func (s *DeleteService) Parent(parent string) *DeleteService {
+ if s.routing == "" {
+ s.routing = parent
+ }
+ return s
+}
+
+func (s *DeleteService) Refresh(refresh bool) *DeleteService {
+ s.refresh = &refresh
+ return s
+}
+
+func (s *DeleteService) Version(version int) *DeleteService {
+ s.version = &version
+ return s
+}
+
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+ s.pretty = pretty
+ return s
+}
+
+// Do deletes the document. It fails if any of index, type, and identifier
+// are missing.
+func (s *DeleteService) Do() (*DeleteResult, error) {
+ if s.index == "" {
+ return nil, ErrMissingIndex
+ }
+ if s._type == "" {
+ return nil, ErrMissingType
+ }
+ if s.id == "" {
+ return nil, ErrMissingId
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "index": s.index,
+ "type": s._type,
+ "id": s.id,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ }
+ if s.routing != "" {
+ params.Set("routing", fmt.Sprintf("%s", s.routing))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return response
+ ret := new(DeleteResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a delete request.
+
+type DeleteResult struct {
+ Found bool `json:"found"`
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int64 `json:"_version"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query.go
new file mode 100644
index 0000000..0628241
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_by_query.go
@@ -0,0 +1,292 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// DeleteByQueryService deletes documents that match a query.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+ client *Client
+ indices []string
+ types []string
+ analyzer string
+ consistency string
+ defaultOper string
+ df string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ replication string
+ routing string
+ timeout string
+ pretty bool
+ q string
+ query Query
+}
+
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+ builder := &DeleteByQueryService{
+ client: client,
+ }
+ return builder
+}
+
+// Index limits the delete-by-query to a single index.
+// You can use _all to perform the operation on all indices.
+func (s *DeleteByQueryService) Index(index string) *DeleteByQueryService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Indices(indices ...string) *DeleteByQueryService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Type limits the delete operation to the given type.
+func (s *DeleteByQueryService) Type(typ string) *DeleteByQueryService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+// Types limits the delete operation to the given types.
+func (s *DeleteByQueryService) Types(types ...string) *DeleteByQueryService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+ s.analyzer = analyzer
+ return s
+}
+
+// Consistency represents the specific write consistency setting for the operation.
+// It can be one, quorum, or all.
+func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
+ s.consistency = consistency
+ return s
+}
+
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+ s.defaultOper = defaultOperator
+ return s
+}
+
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+ return s
+}
+
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+ s.ignoreUnavailable = &ignore
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+ s.allowNoIndices = &allow
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+ s.expandWildcards = expand
+ return s
+}
+
+// Replication sets a specific replication type (sync or async).
+func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
+ s.replication = replication
+ return s
+}
+
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+ s.q = query
+ return s
+}
+
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+ s.q = query
+ return s
+}
+
+// Routing sets a specific routing value.
+func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
+ s.routing = routing
+ return s
+}
+
+// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+ s.timeout = timeout
+ return s
+}
+
+// Pretty indents the JSON output from Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+ s.pretty = pretty
+ return s
+}
+
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+ s.query = query
+ return s
+}
+
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
+ var err error
+
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types part
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err = uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_query"
+
+ // Parameters
+ params := make(url.Values)
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.consistency != "" {
+ params.Set("consistency", s.consistency)
+ }
+ if s.defaultOper != "" {
+ params.Set("default_operator", s.defaultOper)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.replication != "" {
+ params.Set("replication", s.replication)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+
+ // Set body if there is a query set
+ var body interface{}
+ if s.query != nil {
+ query := make(map[string]interface{})
+ query["query"] = s.query.Source()
+ body = query
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("DELETE", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(DeleteByQueryResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
+type DeleteByQueryResult struct {
+ Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
+}
+
+// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
+// index.
+type IndexDeleteByQueryResult struct {
+ Shards shardsInfo `json:"_shards"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_index.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_index.go
new file mode 100644
index 0000000..57d08b4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_index.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type DeleteIndexService struct {
+ client *Client
+ index string
+}
+
+func NewDeleteIndexService(client *Client) *DeleteIndexService {
+ builder := &DeleteIndexService{
+ client: client,
+ }
+ return builder
+}
+
+func (b *DeleteIndexService) Index(index string) *DeleteIndexService {
+ b.index = index
+ return b
+}
+
+func (b *DeleteIndexService) Do() (*DeleteIndexResult, error) {
+ // Build url
+ path, err := uritemplates.Expand("/{index}/", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("DELETE", path, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(DeleteIndexResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a delete index request.
+
+type DeleteIndexResult struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping.go
new file mode 100644
index 0000000..20bc6f5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_mapping.go
@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// DeleteMappingService allows to delete a mapping along with its data.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html.
+type DeleteMappingService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ masterTimeout string
+}
+
+// NewDeleteMappingService creates a new DeleteMappingService.
+func NewDeleteMappingService(client *Client) *DeleteMappingService {
+ return &DeleteMappingService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names (supports wildcards). Use `_all` for all indices.
+func (s *DeleteMappingService) Index(index ...string) *DeleteMappingService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types to delete (supports wildcards).
+// Use `_all` to delete all document types in the specified indices..
+func (s *DeleteMappingService) Type(typ ...string) *DeleteMappingService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// MasterTimeout specifies the timeout for connecting to master.
+func (s *DeleteMappingService) MasterTimeout(masterTimeout string) *DeleteMappingService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *DeleteMappingService) Pretty(pretty bool) *DeleteMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteMappingService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteMappingService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(s.typ) == 0 {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *DeleteMappingService) Do() (*DeleteMappingResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(DeleteMappingResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DeleteMappingResponse is the response of DeleteMappingService.Do.
+type DeleteMappingResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_template.go
new file mode 100644
index 0000000..cfbe057
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/delete_template.go
@@ -0,0 +1,118 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// DeleteTemplateService deletes a search template. More information can
+// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type DeleteTemplateService struct {
+ client *Client
+ pretty bool
+ id string
+ version *int
+ versionType string
+}
+
+// NewDeleteTemplateService creates a new DeleteTemplateService.
+func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
+ return &DeleteTemplateService{
+ client: client,
+ }
+}
+
+// Id is the template ID.
+func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
+ s.id = id
+ return s
+}
+
+// Version an explicit version number for concurrency control.
+func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
+ s.version = &version
+ return s
+}
+
+// VersionType specifies a version type.
+func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
+ s.versionType = versionType
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteTemplateService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(DeleteTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
+type DeleteTemplateResponse struct {
+ Found bool `json:"found"`
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/doc.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/doc.go
new file mode 100644
index 0000000..336a734
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/doc.go
@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package elastic provides an interface to the Elasticsearch server
+(http://www.elasticsearch.org/).
+
+The first thing you do is to create a Client. If you have Elasticsearch
+installed and running with its default settings
+(i.e. available at http://127.0.0.1:9200), all you need to do is:
+
+ client, err := elastic.NewClient()
+ if err != nil {
+ // Handle error
+ }
+
+If your Elasticsearch server is running on a different IP and/or port,
+just provide a URL to NewClient:
+
+ // Create a client and connect to http://192.168.2.10:9201
+ client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
+ if err != nil {
+ // Handle error
+ }
+
+You can pass many more configuration parameters to NewClient. Review the
+documentation of NewClient for more information.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient.
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+ exists, err := client.IndexExists("twitter").Do()
+ if err != nil {
+ // Handle error
+ }
+ if !exists {
+ // Index does not exist yet.
+ }
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+Also see the wiki on Github for more details.
+
+*/
+package elastic
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors.go
new file mode 100644
index 0000000..abbb09c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/errors.go
@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+var (
+ // ErrMissingIndex is returned e.g. from DeleteService if the index is missing.
+ ErrMissingIndex = errors.New("elastic: index is missing")
+
+ // ErrMissingType is returned e.g. from DeleteService if the type is missing.
+ ErrMissingType = errors.New("elastic: type is missing")
+
+ // ErrMissingId is returned e.g. from DeleteService if the document identifier is missing.
+ ErrMissingId = errors.New("elastic: id is missing")
+)
+
+func checkResponse(res *http.Response) error {
+ // 200-299 and 404 are valid status codes
+ if (res.StatusCode >= 200 && res.StatusCode <= 299) || res.StatusCode == http.StatusNotFound {
+ return nil
+ }
+ if res.Body == nil {
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return fmt.Errorf("elastic: Error %d (%s) when reading body: %v", res.StatusCode, http.StatusText(res.StatusCode), err)
+ }
+ errReply := new(Error)
+ err = json.Unmarshal(slurp, errReply)
+ if err != nil {
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ }
+ if errReply != nil {
+ if errReply.Status == 0 {
+ errReply.Status = res.StatusCode
+ }
+ return errReply
+ }
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+}
+
+type Error struct {
+ Status int `json:"status"`
+ Message string `json:"error"`
+}
+
+func (e *Error) Error() string {
+ if e.Message != "" {
+ return fmt.Sprintf("elastic: Error %d (%s): %s", e.Status, http.StatusText(e.Status), e.Message)
+ } else {
+ return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists.go
new file mode 100644
index 0000000..534ad5d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/exists.go
@@ -0,0 +1,176 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ExistsService checks if a document exists.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
+// for details.
+type ExistsService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ parent string
+ preference string
+ realtime *bool
+ refresh *bool
+ routing string
+}
+
+// NewExistsService creates a new ExistsService.
+func NewExistsService(client *Client) *ExistsService {
+ return &ExistsService{
+ client: client,
+ }
+}
+
+// Id is the document ID.
+func (s *ExistsService) Id(id string) *ExistsService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *ExistsService) Index(index string) *ExistsService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document (use `_all` to fetch the first
+// document matching the ID across all types).
+func (s *ExistsService) Type(typ string) *ExistsService {
+ s.typ = typ
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExistsService) Parent(parent string) *ExistsService {
+ s.parent = parent
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *ExistsService) Preference(preference string) *ExistsService {
+ s.preference = preference
+ return s
+}
+
+// Realtime specifies whether to perform the operation in realtime or search mode.
+func (s *ExistsService) Realtime(realtime bool) *ExistsService {
+ s.realtime = &realtime
+ return s
+}
+
+// Refresh the shard containing the document before performing the operation.
+func (s *ExistsService) Refresh(refresh bool) *ExistsService {
+ s.refresh = &refresh
+ return s
+}
+
+// Routing is the specific routing value.
+func (s *ExistsService) Routing(routing string) *ExistsService {
+ s.routing = routing
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExistsService) Pretty(pretty bool) *ExistsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExistsService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.realtime != nil {
+ params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
+ }
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExistsService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ExistsService) Do() (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
+ if err != nil {
+ return false, err
+ }
+
+ // Evaluate operation response
+ switch res.StatusCode {
+ case http.StatusOK:
+ return true, nil
+ case http.StatusNotFound:
+ return false, nil
+ default:
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain.go
new file mode 100644
index 0000000..b6b9648
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/explain.go
@@ -0,0 +1,329 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// ExplainService computes a score explanation for a query and
+// a specific document.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
+type ExplainService struct {
+ client *Client
+ pretty bool
+ id string
+ index string
+ typ string
+ q string
+ routing string
+ lenient *bool
+ analyzer string
+ df string
+ fields []string
+ lowercaseExpandedTerms *bool
+ xSourceInclude []string
+ analyzeWildcard *bool
+ parent string
+ preference string
+ xSource []string
+ defaultOperator string
+ xSourceExclude []string
+ source string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewExplainService creates a new ExplainService.
+func NewExplainService(client *Client) *ExplainService {
+ return &ExplainService{
+ client: client,
+ xSource: make([]string, 0),
+ xSourceExclude: make([]string, 0),
+ fields: make([]string, 0),
+ xSourceInclude: make([]string, 0),
+ }
+}
+
+// Id is the document ID.
+func (s *ExplainService) Id(id string) *ExplainService {
+ s.id = id
+ return s
+}
+
+// Index is the name of the index.
+func (s *ExplainService) Index(index string) *ExplainService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document.
+func (s *ExplainService) Type(typ string) *ExplainService {
+ s.typ = typ
+ return s
+}
+
+// Source is the URL-encoded query definition (instead of using the request body).
+func (s *ExplainService) Source(source string) *ExplainService {
+ s.source = source
+ return s
+}
+
+// XSourceExclude is a list of fields to exclude from the returned _source field.
+func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
+ s.xSourceExclude = make([]string, 0)
+ s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+ return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *ExplainService) Lenient(lenient bool) *ExplainService {
+ s.lenient = &lenient
+ return s
+}
+
+// Query in the Lucene query string syntax.
+func (s *ExplainService) Q(q string) *ExplainService {
+ s.q = q
+ return s
+}
+
+// Routing sets a specific routing value.
+func (s *ExplainService) Routing(routing string) *ExplainService {
+ s.routing = routing
+ return s
+}
+
+// AnalyzeWildcard specifies whether wildcards and prefix queries
+// in the query string query should be analyzed (default: false).
+func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
+ s.analyzeWildcard = &analyzeWildcard
+ return s
+}
+
+// Analyzer is the analyzer for the query string query.
+func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
+ s.analyzer = analyzer
+ return s
+}
+
+// Df is the default field for query string query (default: _all).
+func (s *ExplainService) Df(df string) *ExplainService {
+ s.df = df
+ return s
+}
+
+// Fields is a list of fields to return in the response.
+func (s *ExplainService) Fields(fields ...string) *ExplainService {
+ s.fields = make([]string, 0)
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return s
+}
+
+// XSourceInclude is a list of fields to extract and return from the _source field.
+func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
+ s.xSourceInclude = make([]string, 0)
+ s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+ return s
+}
+
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
+ s.defaultOperator = defaultOperator
+ return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExplainService) Parent(parent string) *ExplainService {
+ s.parent = parent
+ return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExplainService) Preference(preference string) *ExplainService {
+ s.preference = preference
+ return s
+}
+
+// XSource is true or false to return the _source field or not, or a list of fields to return.
+func (s *ExplainService) XSource(xSource ...string) *ExplainService {
+ s.xSource = make([]string, 0)
+ s.xSource = append(s.xSource, xSource...)
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExplainService) Pretty(pretty bool) *ExplainService {
+ s.pretty = pretty
+ return s
+}
+
+// Query sets a query definition using the Query DSL.
+func (s *ExplainService) Query(query Query) *ExplainService {
+ body := make(map[string]interface{})
+ body["query"] = query.Source()
+ s.bodyJson = body
+ return s
+}
+
+// BodyJson sets the query definition using the Query DSL.
+func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString sets the query definition using the Query DSL as a string.
+func (s *ExplainService) BodyString(body string) *ExplainService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExplainService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
+ "id": s.id,
+ "index": s.index,
+ "type": s.typ,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if len(s.xSource) > 0 {
+ params.Set("_source", strings.Join(s.xSource, ","))
+ }
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ }
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.source != "" {
+ params.Set("source", s.source)
+ }
+ if len(s.xSourceExclude) > 0 {
+ params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+ }
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ }
+ if s.q != "" {
+ params.Set("q", s.q)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ }
+ if len(s.xSourceInclude) > 0 {
+ params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+ }
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+ }
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ }
+ if s.df != "" {
+ params.Set("df", s.df)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExplainService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *ExplainService) Do() (*ExplainResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(ExplainResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// ExplainResponse is the response of ExplainService.Do.
+type ExplainResponse struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Matched bool `json:"matched"`
+ Explanation map[string]interface{} `json:"explanation"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context.go
new file mode 100644
index 0000000..6c9b91b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/fetch_source_context.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "net/url"
+ "strings"
+)
+
+type FetchSourceContext struct {
+ fetchSource bool
+ transformSource bool
+ includes []string
+ excludes []string
+}
+
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+ return &FetchSourceContext{
+ fetchSource: fetchSource,
+ includes: make([]string, 0),
+ excludes: make([]string, 0),
+ }
+}
+
+func (fsc *FetchSourceContext) FetchSource() bool {
+ return fsc.fetchSource
+}
+
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+ fsc.fetchSource = fetchSource
+}
+
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+ fsc.includes = append(fsc.includes, includes...)
+ return fsc
+}
+
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+ fsc.excludes = append(fsc.excludes, excludes...)
+ return fsc
+}
+
+func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
+ fsc.transformSource = transformSource
+ return fsc
+}
+
+func (fsc *FetchSourceContext) Source() interface{} {
+ if !fsc.fetchSource {
+ return false
+ }
+ return map[string]interface{}{
+ "includes": fsc.includes,
+ "excludes": fsc.excludes,
+ }
+}
+
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+ params := url.Values{}
+ if !fsc.fetchSource {
+ params.Add("_source", "false")
+ return params
+ }
+ if len(fsc.includes) > 0 {
+ params.Add("_source_include", strings.Join(fsc.includes, ","))
+ }
+ if len(fsc.excludes) > 0 {
+ params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+ }
+ return params
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/filter.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/filter.go
new file mode 100644
index 0000000..ba1f012
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/filter.go
@@ -0,0 +1,9 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Filter interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush.go
new file mode 100644
index 0000000..c723371
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/flush.go
@@ -0,0 +1,167 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// Flush allows to flush one or more indices. The flush process of an index
+// basically frees memory from the index by flushing data to the index
+// storage and clearing the internal transaction log.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
+// for details.
+type FlushService struct {
+ client *Client
+
+ indices []string
+ force *bool
+ full *bool
+ waitIfOngoing *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+func NewFlushService(client *Client) *FlushService {
+ builder := &FlushService{
+ client: client,
+ }
+ return builder
+}
+
+func (s *FlushService) Index(index string) *FlushService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *FlushService) Indices(indices ...string) *FlushService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Force specifies whether to force a flush even if it is not necessary.
+func (s *FlushService) Force(force bool) *FlushService {
+ s.force = &force
+ return s
+}
+
+// Full, when set to true, creates a new index writer for the index and
+// refreshes all settings related to the index.
+func (s *FlushService) Full(full bool) *FlushService {
+ s.full = &full
+ return s
+}
+
+// WaitIfOngoing will block until the flush can be executed (if set to true)
+// if another flush operation is already executing. The default is false
+// and will cause an exception to be thrown on the shard level if another
+// flush operation is already running. [1.4.0.Beta1]
+func (s *FlushService) WaitIfOngoing(wait bool) *FlushService {
+ s.waitIfOngoing = &wait
+ return s
+}
+
+// IgnoreUnavailable specifies whether concrete indices should be ignored
+// when unavailable (e.g. missing or closed).
+func (s *FlushService) IgnoreUnavailable(ignoreUnavailable bool) *FlushService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices specifies whether to ignore if a wildcard expression
+// yields no indices. This includes the _all index or when no indices
+// have been specified.
+func (s *FlushService) AllowNoIndices(allowNoIndices bool) *FlushService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards specifies whether to expand wildcards to concrete indices
+// that are open, closed, or both. Use one of "open", "closed", "none", or "all".
+func (s *FlushService) ExpandWildcards(expandWildcards string) *FlushService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Do executes the service.
+func (s *FlushService) Do() (*FlushResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ if len(s.indices) > 0 {
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",") + "/"
+ }
+ path += "_flush"
+
+ // Parameters
+ params := make(url.Values)
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.full != nil {
+ params.Set("full", fmt.Sprintf("%v", *s.full))
+ }
+ if s.waitIfOngoing != nil {
+ params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(FlushResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a flush request.
+
+type shardsInfo struct {
+ Total int `json:"total"`
+ Successful int `json:"successful"`
+ Failed int `json:"failed"`
+}
+
+type FlushResult struct {
+ Shards shardsInfo `json:"_shards"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point.go
new file mode 100644
index 0000000..4f55955
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/geo_point.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+ Lat, Lon float64
+}
+
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+ return map[string]float64{
+ "lat": pt.Lat,
+ "lon": pt.Lon,
+ }
+}
+
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+ return &GeoPoint{Lat: lat, Lon: lon}
+}
+
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+ latlon := strings.SplitN(latLon, ",", 2)
+ if len(latlon) != 2 {
+ return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+ }
+ lat, err := strconv.ParseFloat(latlon[0], 64)
+ if err != nil {
+ return nil, err
+ }
+ lon, err := strconv.ParseFloat(latlon[1], 64)
+ if err != nil {
+ return nil, err
+ }
+ return &GeoPoint{Lat: lat, Lon: lon}, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get.go
new file mode 100644
index 0000000..94cde57
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get.go
@@ -0,0 +1,223 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type GetService struct {
+ client *Client
+ index string
+ typ string
+ id string
+ routing string
+ preference string
+ fields []string
+ refresh *bool
+ realtime *bool
+ fsc *FetchSourceContext
+ versionType string
+ version *int64
+ ignoreErrorsOnGeneratedFields *bool
+}
+
+func NewGetService(client *Client) *GetService {
+ builder := &GetService{
+ client: client,
+ typ: "_all",
+ }
+ return builder
+}
+
+func (b *GetService) String() string {
+ return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
+ b.index,
+ b.typ,
+ b.id,
+ b.routing)
+}
+
+func (b *GetService) Index(index string) *GetService {
+ b.index = index
+ return b
+}
+
+func (b *GetService) Type(typ string) *GetService {
+ b.typ = typ
+ return b
+}
+
+func (b *GetService) Id(id string) *GetService {
+ b.id = id
+ return b
+}
+
+func (b *GetService) Parent(parent string) *GetService {
+ if b.routing == "" {
+ b.routing = parent
+ }
+ return b
+}
+
+func (b *GetService) Routing(routing string) *GetService {
+ b.routing = routing
+ return b
+}
+
+func (b *GetService) Preference(preference string) *GetService {
+ b.preference = preference
+ return b
+}
+
+func (b *GetService) Fields(fields ...string) *GetService {
+ if b.fields == nil {
+ b.fields = make([]string, 0)
+ }
+ b.fields = append(b.fields, fields...)
+ return b
+}
+
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+ if s.fsc == nil {
+ s.fsc = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fsc.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+ s.fsc = fetchSourceContext
+ return s
+}
+
+func (b *GetService) Refresh(refresh bool) *GetService {
+ b.refresh = &refresh
+ return b
+}
+
+func (b *GetService) Realtime(realtime bool) *GetService {
+ b.realtime = &realtime
+ return b
+}
+
+func (b *GetService) VersionType(versionType string) *GetService {
+ b.versionType = versionType
+ return b
+}
+
+func (b *GetService) Version(version int64) *GetService {
+ b.version = &version
+ return b
+}
+
+func (b *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
+ b.ignoreErrorsOnGeneratedFields = &ignore
+ return b
+}
+
+// Validate checks if the operation is valid.
+func (s *GetService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+func (b *GetService) Do() (*GetResult, error) {
+ // Check pre-conditions
+ if err := b.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Build url
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "index": b.index,
+ "type": b.typ,
+ "id": b.id,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ params := make(url.Values)
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ }
+ if len(b.fields) > 0 {
+ params.Add("fields", strings.Join(b.fields, ","))
+ }
+ if b.routing != "" {
+ params.Add("routing", b.routing)
+ }
+ if b.preference != "" {
+ params.Add("preference", b.preference)
+ }
+ if b.refresh != nil {
+ params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+ }
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ }
+ if b.ignoreErrorsOnGeneratedFields != nil {
+ params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *b.ignoreErrorsOnGeneratedFields))
+ }
+ if len(b.fields) > 0 {
+ params.Add("_fields", strings.Join(b.fields, ","))
+ }
+ if b.version != nil {
+ params.Add("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Add("version_type", b.versionType)
+ }
+ if b.fsc != nil {
+ for k, values := range b.fsc.Query() {
+ params.Add(k, strings.Join(values, ","))
+ }
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(GetResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a get request.
+
+type GetResult struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int64 `json:"_version,omitempty"`
+ Source *json.RawMessage `json:"_source,omitempty"`
+ Found bool `json:"found,omitempty"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ Error string `json:"error,omitempty"` // used only in MultiGet
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping.go
new file mode 100644
index 0000000..13ad343
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_mapping.go
@@ -0,0 +1,172 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// GetMappingService retrieves the mapping definitions for an index or
+// index/type. See at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-mapping.html.
+type GetMappingService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+}
+
+// NewGetMappingService creates a new GetMappingService.
+func NewGetMappingService(client *Client) *GetMappingService {
+ return &GetMappingService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names.
+func (s *GetMappingService) Index(index ...string) *GetMappingService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types.
+func (s *GetMappingService) Type(typ ...string) *GetMappingService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *GetMappingService) AllowNoIndices(allowNoIndices bool) *GetMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *GetMappingService) ExpandWildcards(expandWildcards string) *GetMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *GetMappingService) Local(local bool) *GetMappingService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *GetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *GetMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *GetMappingService) Pretty(pretty bool) *GetMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetMappingService) buildURL() (string, url.Values, error) {
+ var index, typ []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.typ) > 0 {
+ typ = s.typ
+ } else {
+ typ = []string{"_all"}
+ }
+
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(index, ","),
+ "type": strings.Join(typ, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetMappingService) Validate() error {
+ return nil
+}
+
+// Do executes the operation. When successful, it returns a json.RawMessage.
+// If you specify an index, Elasticsearch returns HTTP status 404.
+// if you specify a type that does not exist, Elasticsearch returns
+// an empty map.
+func (s *GetMappingService) Do() (map[string]interface{}, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]interface{}
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template.go
new file mode 100644
index 0000000..ef9d561
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/get_template.go
@@ -0,0 +1,113 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// GetTemplateService reads a search template.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type GetTemplateService struct {
+ client *Client
+ pretty bool
+ id string
+ version interface{}
+ versionType string
+}
+
+// NewGetTemplateService creates a new GetTemplateService.
+func NewGetTemplateService(client *Client) *GetTemplateService {
+ return &GetTemplateService{
+ client: client,
+ }
+}
+
+// Id is the template ID.
+func (s *GetTemplateService) Id(id string) *GetTemplateService {
+ s.id = id
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
+ s.version = version
+ return s
+}
+
+// VersionType is a specific version type.
+func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
+ s.versionType = versionType
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetTemplateService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation and returns the template.
+func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(GetTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type GetTemplateResponse struct {
+ Template string `json:"template"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight.go
new file mode 100644
index 0000000..dab8c45
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/highlight.go
@@ -0,0 +1,496 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+type Highlight struct {
+ fields []*HighlighterField
+ tagsSchema *string
+ highlightFilter *bool
+ fragmentSize *int
+ numOfFragments *int
+ preTags []string
+ postTags []string
+ order *string
+ encoder *string
+ requireFieldMatch *bool
+ boundaryMaxScan *int
+ boundaryChars []rune
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+ useExplicitFieldOrder bool
+}
+
+func NewHighlight() *Highlight {
+ hl := &Highlight{
+ fields: make([]*HighlighterField, 0),
+ preTags: make([]string, 0),
+ postTags: make([]string, 0),
+ boundaryChars: make([]rune, 0),
+ options: make(map[string]interface{}),
+ }
+ return hl
+}
+
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+ hl.fields = append(hl.fields, fields...)
+ return hl
+}
+
+func (hl *Highlight) Field(name string) *Highlight {
+ field := NewHighlighterField(name)
+ hl.fields = append(hl.fields, field)
+ return hl
+}
+
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+ hl.tagsSchema = &schemaName
+ return hl
+}
+
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+ hl.highlightFilter = &highlightFilter
+ return hl
+}
+
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+ hl.fragmentSize = &fragmentSize
+ return hl
+}
+
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+ hl.numOfFragments = &numOfFragments
+ return hl
+}
+
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+ hl.encoder = &encoder
+ return hl
+}
+
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+ hl.preTags = make([]string, 0)
+ hl.preTags = append(hl.preTags, preTags...)
+ return hl
+}
+
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+ hl.postTags = make([]string, 0)
+ hl.postTags = append(hl.postTags, postTags...)
+ return hl
+}
+
+func (hl *Highlight) Order(order string) *Highlight {
+ hl.order = &order
+ return hl
+}
+
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+ hl.requireFieldMatch = &requireFieldMatch
+ return hl
+}
+
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+ hl.boundaryMaxScan = &boundaryMaxScan
+ return hl
+}
+
+func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
+ hl.boundaryChars = make([]rune, 0)
+ hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
+ return hl
+}
+
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+ hl.highlighterType = &highlighterType
+ return hl
+}
+
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+ hl.fragmenter = &fragmenter
+ return hl
+}
+
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+ hl.highlightQuery = highlightQuery
+ return hl
+}
+
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+ hl.noMatchSize = &noMatchSize
+ return hl
+}
+
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+ hl.options = options
+ return hl
+}
+
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+ hl.forceSource = &forceSource
+ return hl
+}
+
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+ hl.useExplicitFieldOrder = useExplicitFieldOrder
+ return hl
+}
+
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() interface{} {
+ // Returns the map inside of "highlight":
+ // "highlight":{
+ // ... this ...
+ // }
+ source := make(map[string]interface{})
+ if hl.tagsSchema != nil {
+ source["tags_schema"] = *hl.tagsSchema
+ }
+ if hl.preTags != nil && len(hl.preTags) > 0 {
+ source["pre_tags"] = hl.preTags
+ }
+ if hl.postTags != nil && len(hl.postTags) > 0 {
+ source["post_tags"] = hl.postTags
+ }
+ if hl.order != nil {
+ source["order"] = *hl.order
+ }
+ if hl.highlightFilter != nil {
+ source["highlight_filter"] = *hl.highlightFilter
+ }
+ if hl.fragmentSize != nil {
+ source["fragment_size"] = *hl.fragmentSize
+ }
+ if hl.numOfFragments != nil {
+ source["number_of_fragments"] = *hl.numOfFragments
+ }
+ if hl.encoder != nil {
+ source["encoder"] = *hl.encoder
+ }
+ if hl.requireFieldMatch != nil {
+ source["require_field_match"] = *hl.requireFieldMatch
+ }
+ if hl.boundaryMaxScan != nil {
+ source["boundary_max_scan"] = *hl.boundaryMaxScan
+ }
+ if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
+ source["boundary_chars"] = hl.boundaryChars
+ }
+ if hl.highlighterType != nil {
+ source["type"] = *hl.highlighterType
+ }
+ if hl.fragmenter != nil {
+ source["fragmenter"] = *hl.fragmenter
+ }
+ if hl.highlightQuery != nil {
+ source["highlight_query"] = hl.highlightQuery.Source()
+ }
+ if hl.noMatchSize != nil {
+ source["no_match_size"] = *hl.noMatchSize
+ }
+ if hl.phraseLimit != nil {
+ source["phrase_limit"] = *hl.phraseLimit
+ }
+ if hl.options != nil && len(hl.options) > 0 {
+ source["options"] = hl.options
+ }
+ if hl.forceSource != nil {
+ source["force_source"] = *hl.forceSource
+ }
+
+ if hl.fields != nil && len(hl.fields) > 0 {
+ if hl.useExplicitFieldOrder {
+ // Use a slice for the fields
+ fields := make([]map[string]interface{}, 0)
+ for _, field := range hl.fields {
+ fmap := make(map[string]interface{})
+ fmap[field.Name] = field.Source()
+ fields = append(fields, fmap)
+ }
+ source["fields"] = fields
+ } else {
+ // Use a map for the fields
+ fields := make(map[string]interface{}, 0)
+ for _, field := range hl.fields {
+ fields[field.Name] = field.Source()
+ }
+ source["fields"] = fields
+ }
+ }
+
+ return source
+
+ /*
+ highlightS := make(map[string]interface{})
+
+ if hl.tagsSchema != "" {
+ highlightS["tags_schema"] = hl.tagsSchema
+ }
+ if len(hl.preTags) > 0 {
+ highlightS["pre_tags"] = hl.preTags
+ }
+ if len(hl.postTags) > 0 {
+ highlightS["post_tags"] = hl.postTags
+ }
+ if hl.order != "" {
+ highlightS["order"] = hl.order
+ }
+ if hl.encoder != "" {
+ highlightS["encoder"] = hl.encoder
+ }
+ if hl.requireFieldMatch != nil {
+ highlightS["require_field_match"] = *hl.requireFieldMatch
+ }
+ if hl.highlighterType != "" {
+ highlightS["type"] = hl.highlighterType
+ }
+ if hl.fragmenter != "" {
+ highlightS["fragmenter"] = hl.fragmenter
+ }
+ if hl.highlightQuery != nil {
+ highlightS["highlight_query"] = hl.highlightQuery.Source()
+ }
+ if hl.noMatchSize != nil {
+ highlightS["no_match_size"] = *hl.noMatchSize
+ }
+ if len(hl.options) > 0 {
+ highlightS["options"] = hl.options
+ }
+ if hl.forceSource != nil {
+ highlightS["force_source"] = *hl.forceSource
+ }
+ if len(hl.fields) > 0 {
+ fieldsS := make(map[string]interface{})
+ for _, field := range hl.fields {
+ fieldsS[field.Name] = field.Source()
+ }
+ highlightS["fields"] = fieldsS
+ }
+
+ return highlightS
+ */
+}
+
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+ Name string
+
+ preTags []string
+ postTags []string
+ fragmentSize int
+ fragmentOffset int
+ numOfFragments int
+ highlightFilter *bool
+ order *string
+ requireFieldMatch *bool
+ boundaryMaxScan int
+ boundaryChars []rune
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ matchedFields []string
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+
+ /*
+ Name string
+ preTags []string
+ postTags []string
+ fragmentSize int
+ numOfFragments int
+ fragmentOffset int
+ highlightFilter *bool
+ order string
+ requireFieldMatch *bool
+ boundaryMaxScan int
+ boundaryChars []rune
+ highlighterType string
+ fragmenter string
+ highlightQuery Query
+ noMatchSize *int
+ matchedFields []string
+ options map[string]interface{}
+ forceSource *bool
+ */
+}
+
+func NewHighlighterField(name string) *HighlighterField {
+ return &HighlighterField{
+ Name: name,
+ preTags: make([]string, 0),
+ postTags: make([]string, 0),
+ fragmentSize: -1,
+ fragmentOffset: -1,
+ numOfFragments: -1,
+ boundaryMaxScan: -1,
+ boundaryChars: make([]rune, 0),
+ matchedFields: make([]string, 0),
+ options: make(map[string]interface{}),
+ }
+}
+
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+ f.preTags = make([]string, 0)
+ f.preTags = append(f.preTags, preTags...)
+ return f
+}
+
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+ f.postTags = make([]string, 0)
+ f.postTags = append(f.postTags, postTags...)
+ return f
+}
+
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+ f.fragmentSize = fragmentSize
+ return f
+}
+
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+ f.fragmentOffset = fragmentOffset
+ return f
+}
+
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+ f.numOfFragments = numOfFragments
+ return f
+}
+
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+ f.highlightFilter = &highlightFilter
+ return f
+}
+
+func (f *HighlighterField) Order(order string) *HighlighterField {
+ f.order = &order
+ return f
+}
+
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+ f.requireFieldMatch = &requireFieldMatch
+ return f
+}
+
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+ f.boundaryMaxScan = boundaryMaxScan
+ return f
+}
+
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+ f.boundaryChars = make([]rune, 0)
+ f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+ return f
+}
+
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+ f.highlighterType = &highlighterType
+ return f
+}
+
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+ f.fragmenter = &fragmenter
+ return f
+}
+
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+ f.highlightQuery = highlightQuery
+ return f
+}
+
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+ f.noMatchSize = &noMatchSize
+ return f
+}
+
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+ f.options = options
+ return f
+}
+
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+ f.matchedFields = make([]string, 0)
+ f.matchedFields = append(f.matchedFields, matchedFields...)
+ return f
+}
+
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+ f.phraseLimit = &phraseLimit
+ return f
+}
+
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+ f.forceSource = &forceSource
+ return f
+}
+
+func (f *HighlighterField) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if f.preTags != nil && len(f.preTags) > 0 {
+ source["pre_tags"] = f.preTags
+ }
+ if f.postTags != nil && len(f.postTags) > 0 {
+ source["post_tags"] = f.postTags
+ }
+ if f.fragmentSize != -1 {
+ source["fragment_size"] = f.fragmentSize
+ }
+ if f.numOfFragments != -1 {
+ source["number_of_fragments"] = f.numOfFragments
+ }
+ if f.fragmentOffset != -1 {
+ source["fragment_offset"] = f.fragmentOffset
+ }
+ if f.highlightFilter != nil {
+ source["highlight_filter"] = *f.highlightFilter
+ }
+ if f.order != nil {
+ source["order"] = *f.order
+ }
+ if f.requireFieldMatch != nil {
+ source["require_field_match"] = *f.requireFieldMatch
+ }
+ if f.boundaryMaxScan != -1 {
+ source["boundary_max_scan"] = f.boundaryMaxScan
+ }
+ if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+ source["boundary_chars"] = f.boundaryChars
+ }
+ if f.highlighterType != nil {
+ source["type"] = *f.highlighterType
+ }
+ if f.fragmenter != nil {
+ source["fragmenter"] = *f.fragmenter
+ }
+ if f.highlightQuery != nil {
+ source["highlight_query"] = f.highlightQuery.Source()
+ }
+ if f.noMatchSize != nil {
+ source["no_match_size"] = *f.noMatchSize
+ }
+ if f.matchedFields != nil && len(f.matchedFields) > 0 {
+ source["matched_fields"] = f.matchedFields
+ }
+ if f.phraseLimit != nil {
+ source["phrase_limit"] = *f.phraseLimit
+ }
+ if f.options != nil && len(f.options) > 0 {
+ source["options"] = f.options
+ }
+ if f.forceSource != nil {
+ source["force_source"] = *f.forceSource
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index.go
new file mode 100644
index 0000000..4262ecb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index.go
@@ -0,0 +1,217 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndexResult is the result of indexing a document in Elasticsearch.
+type IndexResult struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+ Created bool `json:"created"`
+}
+
+// IndexService adds documents to Elasticsearch.
+type IndexService struct {
+ client *Client
+ index string
+ _type string
+ id string
+ routing string
+ parent string
+ opType string
+ refresh *bool
+ version *int64
+ versionType string
+ timestamp string
+ ttl string
+ timeout string
+ bodyString string
+ bodyJson interface{}
+ pretty bool
+}
+
+func NewIndexService(client *Client) *IndexService {
+ builder := &IndexService{
+ client: client,
+ }
+ return builder
+}
+
+func (b *IndexService) Index(name string) *IndexService {
+ b.index = name
+ return b
+}
+
+func (b *IndexService) Type(_type string) *IndexService {
+ b._type = _type
+ return b
+}
+
+func (b *IndexService) Id(id string) *IndexService {
+ b.id = id
+ return b
+}
+
+func (b *IndexService) Routing(routing string) *IndexService {
+ b.routing = routing
+ return b
+}
+
+func (b *IndexService) Parent(parent string) *IndexService {
+ b.parent = parent
+ return b
+}
+
+// OpType is either "create" or "index" (the default).
+func (b *IndexService) OpType(opType string) *IndexService {
+ b.opType = opType
+ return b
+}
+
+func (b *IndexService) Refresh(refresh bool) *IndexService {
+ b.refresh = &refresh
+ return b
+}
+
+func (b *IndexService) Version(version int64) *IndexService {
+ b.version = &version
+ return b
+}
+
+// VersionType is either "internal" (default), "external",
+// "external_gt", "external_gte", or "force".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
+// for details.
+func (b *IndexService) VersionType(versionType string) *IndexService {
+ b.versionType = versionType
+ return b
+}
+
+func (b *IndexService) Timestamp(timestamp string) *IndexService {
+ b.timestamp = timestamp
+ return b
+}
+
+func (b *IndexService) TTL(ttl string) *IndexService {
+ b.ttl = ttl
+ return b
+}
+
+func (b *IndexService) Timeout(timeout string) *IndexService {
+ b.timeout = timeout
+ return b
+}
+
+func (b *IndexService) BodyString(body string) *IndexService {
+ b.bodyString = body
+ return b
+}
+
+func (b *IndexService) BodyJson(json interface{}) *IndexService {
+ b.bodyJson = json
+ return b
+}
+
+func (b *IndexService) Pretty(pretty bool) *IndexService {
+ b.pretty = pretty
+ return b
+}
+
+func (b *IndexService) Do() (*IndexResult, error) {
+ // Build url
+ var path, method string
+ if b.id != "" {
+ // Create document with manual id
+ method = "PUT"
+ path = "/{index}/{type}/{id}"
+ } else {
+ // Automatic ID generation
+ // See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
+ method = "POST"
+ path = "/{index}/{type}/"
+ }
+ path, err := uritemplates.Expand(path, map[string]string{
+ "index": b.index,
+ "type": b._type,
+ "id": b.id,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "true")
+ }
+ if b.routing != "" {
+ params.Set("routing", b.routing)
+ }
+ if b.parent != "" {
+ params.Set("parent", b.parent)
+ }
+ if b.opType != "" {
+ params.Set("op_type", b.opType)
+ }
+ if b.refresh != nil && *b.refresh {
+ params.Set("refresh", "true")
+ }
+ if b.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Set("version_type", b.versionType)
+ }
+ if b.timestamp != "" {
+ params.Set("timestamp", b.timestamp)
+ }
+ if b.ttl != "" {
+ params.Set("ttl", b.ttl)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+
+ /*
+ routing string
+ parent string
+ opType string
+ refresh *bool
+ version *int64
+ versionType string
+ timestamp string
+ ttl string
+ */
+
+ // Body
+ var body interface{}
+ if b.bodyJson != nil {
+ body = b.bodyJson
+ } else {
+ body = b.bodyString
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest(method, path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(IndexResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_close.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_close.go
new file mode 100644
index 0000000..7b0481c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_close.go
@@ -0,0 +1,145 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// CloseIndexService closes an index.
+// See documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type CloseIndexService struct {
+ client *Client
+ pretty bool
+ index string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ timeout string
+ masterTimeout string
+}
+
+// NewCloseIndexService creates a new CloseIndexService.
+func NewCloseIndexService(client *Client) *CloseIndexService {
+ return &CloseIndexService{client: client}
+}
+
+// Index is the name of the index.
+func (s *CloseIndexService) Index(index string) *CloseIndexService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *CloseIndexService) Timeout(timeout string) *CloseIndexService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CloseIndexService) MasterTimeout(masterTimeout string) *CloseIndexService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *CloseIndexService) IgnoreUnavailable(ignoreUnavailable bool) *CloseIndexService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *CloseIndexService) AllowNoIndices(allowNoIndices bool) *CloseIndexService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *CloseIndexService) ExpandWildcards(expandWildcards string) *CloseIndexService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *CloseIndexService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_close", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *CloseIndexService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *CloseIndexService) Do() (*CloseIndexResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(CloseIndexResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// CloseIndexResponse is the response of CloseIndexService.Do.
+type CloseIndexResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_exists.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_exists.go
new file mode 100644
index 0000000..fcf4ada
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_exists.go
@@ -0,0 +1,50 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type IndexExistsService struct {
+ client *Client
+ index string
+}
+
+func NewIndexExistsService(client *Client) *IndexExistsService {
+ builder := &IndexExistsService{
+ client: client,
+ }
+ return builder
+}
+
+func (b *IndexExistsService) Index(index string) *IndexExistsService {
+ b.index = index
+ return b
+}
+
+func (b *IndexExistsService) Do() (bool, error) {
+ // Build url
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ "index": b.index,
+ })
+ if err != nil {
+ return false, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("HEAD", path, nil, nil)
+ if err != nil {
+ return false, err
+ }
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ }
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get.go
new file mode 100644
index 0000000..89aecb6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get.go
@@ -0,0 +1,186 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// IndicesGetService retrieves information about one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-index.html.
+type IndicesGetService struct {
+ client *Client
+ pretty bool
+ index []string
+ feature []string
+ expandWildcards string
+ local *bool
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+}
+
+// NewIndicesGetService creates a new IndicesGetService.
+func NewIndicesGetService(client *Client) *IndicesGetService {
+ return &IndicesGetService{
+ client: client,
+ index: make([]string, 0),
+ feature: make([]string, 0),
+ }
+}
+
+// Index is a list of index names. Use _all to retrieve information about
+// all indices of a cluster.
+func (s *IndicesGetService) Index(index ...string) *IndicesGetService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Feature is a list of features (e.g. _settings,_mappings,_warmers, and _aliases).
+func (s *IndicesGetService) Feature(feature ...string) *IndicesGetService {
+ s.feature = append(s.feature, feature...)
+ return s
+}
+
+// ExpandWildcards indicates whether wildcard expressions should
+// get expanded to open or closed indices (default: open).
+func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local indicates whether to return local information (do not retrieve
+// the state from master node (default: false)).
+func (s *IndicesGetService) Local(local bool) *IndicesGetService {
+ s.local = &local
+ return s
+}
+
+// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
+func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard expression
+// resolves to no concrete indices (default: false).
+func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ var index []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.feature) > 0 {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
+ "index": strings.Join(index, ","),
+ "feature": strings.Join(s.feature, ","),
+ })
+ } else {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}", map[string]string{
+ "index": strings.Join(index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetResponse is part of the response of IndicesGetService.Do.
+type IndicesGetResponse struct {
+ Aliases map[string]interface{} `json:"aliases"`
+ Mappings map[string]interface{} `json:"mappings"`
+ Settings map[string]interface{} `json:"settings"`
+ Warmers map[string]interface{} `json:"warmers"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings.go
new file mode 100644
index 0000000..f498e81
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_get_settings.go
@@ -0,0 +1,189 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// IndicesGetSettingsService allows to retrieve settings of one
+// or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-get-settings.html.
+type IndicesGetSettingsService struct {
+ client *Client
+ pretty bool
+ index []string
+ name []string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ flatSettings *bool
+ local *bool
+}
+
+// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
+func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
+ return &IndicesGetSettingsService{
+ client: client,
+ index: make([]string, 0),
+ name: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` or empty string to perform the operation on all indices.
+func (s *IndicesGetSettingsService) Index(index ...string) *IndicesGetSettingsService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Name are the names of the settings that should be included.
+func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
+ s.name = append(s.name, name...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression
+// to concrete indices that are open, closed or both.
+// Options: open, closed, none, all. Default: open,closed.
+func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ var index []string
+
+ if len(s.index) > 0 {
+ index = s.index
+ } else {
+ index = []string{"_all"}
+ }
+
+ if len(s.name) > 0 {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
+ "index": strings.Join(index, ","),
+ "name": strings.Join(s.name, ","),
+ })
+ } else {
+ // Build URL
+ path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+ "index": strings.Join(index, ","),
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetSettingsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetSettingsResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
+type IndicesGetSettingsResponse struct {
+ Settings map[string]interface{} `json:"settings"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_open.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_open.go
new file mode 100644
index 0000000..e93e50e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/index_open.go
@@ -0,0 +1,146 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// OpenIndexService opens an index.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type OpenIndexService struct {
+ client *Client
+ pretty bool
+ index string
+ expandWildcards string
+ timeout string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+}
+
+// NewOpenIndexService creates a new OpenIndexService.
+func NewOpenIndexService(client *Client) *OpenIndexService {
+ return &OpenIndexService{client: client}
+}
+
+// Index is the name of the index to open.
+func (s *OpenIndexService) Index(index string) *OpenIndexService {
+ s.index = index
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *OpenIndexService) Timeout(timeout string) *OpenIndexService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *OpenIndexService) MasterTimeout(masterTimeout string) *OpenIndexService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *OpenIndexService) IgnoreUnavailable(ignoreUnavailable bool) *OpenIndexService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *OpenIndexService) AllowNoIndices(allowNoIndices bool) *OpenIndexService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *OpenIndexService) ExpandWildcards(expandWildcards string) *OpenIndexService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *OpenIndexService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/_open", map[string]string{
+ "index": s.index,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *OpenIndexService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *OpenIndexService) Do() (*OpenIndexResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(OpenIndexResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// OpenIndexResponse is the response of OpenIndexService.Do.
+type OpenIndexResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_delete_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_delete_template.go
new file mode 100644
index 0000000..faaeb3a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_delete_template.go
@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesDeleteTemplateService deletes index templates.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesDeleteTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ timeout string
+ masterTimeout string
+}
+
+// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
+func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
+ return &IndicesDeleteTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the template.
+func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
+ s.name = name
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesDeleteTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesDeleteTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
+type IndicesDeleteTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template.go
new file mode 100644
index 0000000..e96e9a1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_template.go
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesExistsTemplateService checks if a given template exists.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
+// for documentation.
+type IndicesExistsTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ local *bool
+}
+
+// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
+func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
+ return &IndicesExistsTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the template.
+func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
+ s.name = name
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTemplateService) Do() (bool, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return false, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
+ if err != nil {
+ return false, err
+ }
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ }
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type.go
new file mode 100644
index 0000000..257a2f0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_exists_type.go
@@ -0,0 +1,155 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesExistsTypeService checks if one or more types exist in one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-types-exists.html.
+type IndicesExistsTypeService struct {
+ client *Client
+ pretty bool
+ index []string
+ typ []string
+ allowNoIndices *bool
+ expandWildcards string
+ local *bool
+ ignoreUnavailable *bool
+}
+
+// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
+func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
+ return &IndicesExistsTypeService{
+ client: client,
+ index: make([]string, 0),
+ typ: make([]string, 0),
+ }
+}
+
+// Index is a list of index names; use `_all` to check the types across all indices.
+func (s *IndicesExistsTypeService) Index(index ...string) *IndicesExistsTypeService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is a list of document types to check.
+func (s *IndicesExistsTypeService) Type(typ ...string) *IndicesExistsTypeService {
+ s.typ = append(s.typ, typ...)
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// Local specifies whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
+ if err := s.Validate(); err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Build URL
+ path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
+ "type": strings.Join(s.typ, ","),
+ "index": strings.Join(s.index, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTypeService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ }
+ if len(s.typ) == 0 {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTypeService) Do() (bool, error) {
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return false, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
+ if err != nil {
+ return false, err
+ }
+
+ // Return operation response
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ }
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template.go
new file mode 100644
index 0000000..1462ee3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_get_template.go
@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesGetTemplateService returns an index template.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesGetTemplateService struct {
+ client *Client
+ pretty bool
+ name []string
+ flatSettings *bool
+ local *bool
+}
+
+// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
+func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
+ return &IndicesGetTemplateService{
+ client: client,
+ name: make([]string, 0),
+ }
+}
+
+// Name is the name of the index template.
+func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
+ s.name = append(s.name, name...)
+ return s
+}
+
+// FlatSettings is returns settings in flat format (default: false).
+func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
+ s.local = &local
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var err error
+ var path string
+ if len(s.name) > 0 {
+ path, err = uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": strings.Join(s.name, ","),
+ })
+ } else {
+ path = "/_template"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetTemplateService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ var ret map[string]*IndicesGetTemplateResponse
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
+type IndicesGetTemplateResponse struct {
+ Order int `json:"order,omitempty"`
+ Template string `json:"template,omitempty"`
+ Settings map[string]interface{} `json:"settings,omitempty"`
+ Mappings map[string]interface{} `json:"mappings,omitempty"`
+ Aliases map[string]interface{} `json:"aliases,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_put_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_put_template.go
new file mode 100644
index 0000000..7a97240
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_put_template.go
@@ -0,0 +1,179 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesPutTemplateService creates or updates index mappings.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesPutTemplateService struct {
+ client *Client
+ pretty bool
+ name string
+ order interface{}
+ create *bool
+ timeout string
+ masterTimeout string
+ flatSettings *bool
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
+func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
+ return &IndicesPutTemplateService{
+ client: client,
+ }
+}
+
+// Name is the name of the index template.
+func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
+ s.name = name
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Order is the order for this template when merging multiple matching ones
+// (higher numbers are merged later, overriding the lower numbers).
+func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
+ s.order = order
+ return s
+}
+
+// Create indicates whether the index template should only be added if
+// new or can also replace an existing one.
+func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
+ s.create = &create
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.order != nil {
+ params.Set("order", fmt.Sprintf("%v", s.order))
+ }
+ if s.create != nil {
+ params.Set("create", fmt.Sprintf("%v", *s.create))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesPutTemplateService) Validate() error {
+ var invalid []string
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesPutTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
+type IndicesPutTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats.go
new file mode 100644
index 0000000..5f03378
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/indices_stats.go
@@ -0,0 +1,385 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// IndicesStatsService provides stats on various metrics of one or more
+// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html.
+type IndicesStatsService struct {
+ client *Client
+ pretty bool
+ metric []string
+ index []string
+ level string
+ types []string
+ completionFields []string
+ fielddataFields []string
+ fields []string
+ groups []string
+ human *bool
+}
+
+// NewIndicesStatsService creates a new IndicesStatsService.
+func NewIndicesStatsService(client *Client) *IndicesStatsService {
+ return &IndicesStatsService{
+ client: client,
+ index: make([]string, 0),
+ metric: make([]string, 0),
+ completionFields: make([]string, 0),
+ fielddataFields: make([]string, 0),
+ fields: make([]string, 0),
+ groups: make([]string, 0),
+ types: make([]string, 0),
+ }
+}
+
+// Metric limits the information returned the specific metrics. Options are:
+// docs, store, indexing, get, search, completion, fielddata, flush, merge,
+// query_cache, refresh, suggest, and warmer.
+func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService {
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// Index is the list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *IndicesStatsService) Index(index ...string) *IndicesStatsService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Level returns stats aggregated at cluster, index or shard level.
+func (s *IndicesStatsService) Level(level string) *IndicesStatsService {
+ s.level = level
+ return s
+}
+
+// Types is a list of document types for the `indexing` index metric.
+func (s *IndicesStatsService) Types(types ...string) *IndicesStatsService {
+ s.types = append(s.types, types...)
+ return s
+}
+
+// CompletionFields is a list of fields for `fielddata` and `suggest`
+// index metric (supports wildcards).
+func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService {
+ s.completionFields = append(s.completionFields, completionFields...)
+ return s
+}
+
+// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
+func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService {
+ s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+ return s
+}
+
+// Fields is a list of fields for `fielddata` and `completion` index metric
+// (supports wildcards).
+func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService {
+ s.fields = append(s.fields, fields...)
+ return s
+}
+
+// Groups is a list of search groups for `search` index metric.
+func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService {
+ s.groups = append(s.groups, groups...)
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format..
+func (s *IndicesStatsService) Human(human bool) *IndicesStatsService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+ if len(s.index) > 0 && len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats", map[string]string{
+ "index": strings.Join(s.index, ","),
+ })
+ } else if len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{
+ "metric": strings.Join(s.metric, ","),
+ })
+ } else {
+ path = "/_stats"
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if len(s.groups) > 0 {
+ params.Set("groups", strings.Join(s.groups, ","))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.level != "" {
+ params.Set("level", s.level)
+ }
+ if len(s.types) > 0 {
+ params.Set("types", strings.Join(s.types, ","))
+ }
+ if len(s.completionFields) > 0 {
+ params.Set("completion_fields", strings.Join(s.completionFields, ","))
+ }
+ if len(s.fielddataFields) > 0 {
+ params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+ }
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesStatsService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(IndicesStatsResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// IndicesStatsResponse is the response of IndicesStatsService.Do.
+type IndicesStatsResponse struct {
+ // Shards provides information returned from shards.
+ Shards shardsInfo `json:"_shards"`
+
+ // All provides summary stats about all indices.
+ All *IndexStats `json:"_all,omitempty"`
+
+ // Indices provides a map into the stats of an index. The key of the
+ // map is the index name.
+ Indices map[string]*IndexStats `json:"indices,omitempty"`
+}
+
+// IndexStats is index stats for a specific index.
+type IndexStats struct {
+ Primaries *IndexStatsDetails `json:"primaries,omitempty"`
+ Total *IndexStatsDetails `json:"total,omitempty"`
+}
+
+type IndexStatsDetails struct {
+ Docs *IndexStatsDocs `json:"docs,omitempty"`
+ Store *IndexStatsStore `json:"store,omitempty"`
+ Indexing *IndexStatsIndexing `json:"indexing,omitempty"`
+ Get *IndexStatsGet `json:"get,omitempty"`
+ Search *IndexStatsSearch `json:"search,omitempty"`
+ Merges *IndexStatsMerges `json:"merges,omitempty"`
+ Refresh *IndexStatsRefresh `json:"refresh,omitempty"`
+ Flush *IndexStatsFlush `json:"flush,omitempty"`
+ Warmer *IndexStatsWarmer `json:"warmer,omitempty"`
+ FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"`
+ IdCache *IndexStatsIdCache `json:"id_cache,omitempty"`
+ Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"`
+ Percolate *IndexStatsPercolate `json:"percolate,omitempty"`
+ Completion *IndexStatsCompletion `json:"completion,omitempty"`
+ Segments *IndexStatsSegments `json:"segments,omitempty"`
+ Translog *IndexStatsTranslog `json:"translog,omitempty"`
+ Suggest *IndexStatsSuggest `json:"suggest,omitempty"`
+ QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"`
+}
+
+type IndexStatsDocs struct {
+ Count int64 `json:"count,omitempty"`
+ Deleted int64 `json:"deleted,omitempty"`
+}
+
+type IndexStatsStore struct {
+ Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+}
+
+type IndexStatsIndexing struct {
+ IndexTotal int64 `json:"index_total,omitempty"`
+ IndexTime string `json:"index_time,omitempty"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
+ IndexCurrent int64 `json:"index_current,omitempty"`
+ DeleteTotal int64 `json:"delete_total,omitempty"`
+ DeleteTime string `json:"delete_time,omitempty"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
+ DeleteCurrent int64 `json:"delete_current,omitempty"`
+ NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
+ IsThrottled bool `json:"is_throttled,omitempty"`
+ ThrottleTime string `json:"throttle_time,omitempty"`
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+}
+
+type IndexStatsGet struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ ExistsTotal int64 `json:"exists_total,omitempty"`
+ ExistsTime string `json:"exists_time,omitempty"`
+ ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"`
+ MissingTotal int64 `json:"missing_total,omitempty"`
+ MissingTime string `json:"missing_time,omitempty"`
+ MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+}
+
+type IndexStatsSearch struct {
+ OpenContexts int64 `json:"open_contexts,omitempty"`
+ QueryTotal int64 `json:"query_total,omitempty"`
+ QueryTime string `json:"query_time,omitempty"`
+ QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"`
+ QueryCurrent int64 `json:"query_current,omitempty"`
+ FetchTotal int64 `json:"fetch_total,omitempty"`
+ FetchTime string `json:"fetch_time,omitempty"`
+ FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"`
+ FetchCurrent int64 `json:"fetch_current,omitempty"`
+}
+
+type IndexStatsMerges struct {
+ Current int64 `json:"current,omitempty"`
+ CurrentDocs int64 `json:"current_docs,omitempty"`
+ CurrentSize string `json:"current_size,omitempty"`
+ CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+ TotalDocs int64 `json:"total_docs,omitempty"`
+ TotalSize string `json:"total_size,omitempty"`
+ TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"`
+}
+
+type IndexStatsRefresh struct {
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFlush struct {
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsWarmer struct {
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFilterCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+}
+
+type IndexStatsIdCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+}
+
+type IndexStatsFielddata struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+}
+
+type IndexStatsPercolate struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Queries int64 `json:"queries,omitempty"`
+}
+
+type IndexStatsCompletion struct {
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSegments struct {
+ Count int64 `json:"count,omitempty"`
+ Memory string `json:"memory,omitempty"`
+ MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
+ IndexWriterMemory string `json:"index_writer_memory,omitempty"`
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"`
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"`
+ VersionMapMemory string `json:"version_map_memory,omitempty"`
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"`
+ FixedBitSetMemory string `json:"fixed_bit_set,omitempty"`
+ FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"`
+}
+
+type IndexStatsTranslog struct {
+ Operations int64 `json:"operations,omitempty"`
+ Size string `json:"size,omitempty"`
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSuggest struct {
+ Total int64 `json:"total,omitempty"`
+ Time string `json:"time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+}
+
+type IndexStatsQueryCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+ HitCount int64 `json:"hit_count,omitempty"`
+ MissCount int64 `json:"miss_count,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit.go
new file mode 100644
index 0000000..0dcf693
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/inner_hit.go
@@ -0,0 +1,156 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// InnerHit implements a simple join for parent/child, nested, and even
+// top-level documents in Elasticsearch.
+// It is an experimental feature for Elasticsearch versions 1.5 (or greater).
+// See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html
+// for documentation.
+//
+// See the tests for SearchSource, HasChildFilter, HasChildQuery,
+// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery
+// for usage examples.
+type InnerHit struct {
+ source *SearchSource
+ path string
+ typ string
+
+ name string
+}
+
+// NewInnerHit creates a new InnerHit.
+func NewInnerHit() *InnerHit {
+ return &InnerHit{source: NewSearchSource()}
+}
+
+func (hit *InnerHit) Path(path string) *InnerHit {
+ hit.path = path
+ return hit
+}
+
+func (hit *InnerHit) Type(typ string) *InnerHit {
+ hit.typ = typ
+ return hit
+}
+
+func (hit *InnerHit) Query(query Query) *InnerHit {
+ hit.source.Query(query)
+ return hit
+}
+
+func (hit *InnerHit) From(from int) *InnerHit {
+ hit.source.From(from)
+ return hit
+}
+
+func (hit *InnerHit) Size(size int) *InnerHit {
+ hit.source.Size(size)
+ return hit
+}
+
+func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit {
+ hit.source.TrackScores(trackScores)
+ return hit
+}
+
+func (hit *InnerHit) Explain(explain bool) *InnerHit {
+ hit.source.Explain(explain)
+ return hit
+}
+
+func (hit *InnerHit) Version(version bool) *InnerHit {
+ hit.source.Version(version)
+ return hit
+}
+
+func (hit *InnerHit) Field(fieldName string) *InnerHit {
+ hit.source.Field(fieldName)
+ return hit
+}
+
+func (hit *InnerHit) Fields(fieldNames ...string) *InnerHit {
+ hit.source.Fields(fieldNames...)
+ return hit
+}
+
+func (hit *InnerHit) NoFields() *InnerHit {
+ hit.source.NoFields()
+ return hit
+}
+
+func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit {
+ hit.source.FetchSource(fetchSource)
+ return hit
+}
+
+func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit {
+ hit.source.FetchSourceContext(fetchSourceContext)
+ return hit
+}
+
+func (hit *InnerHit) FieldDataFields(fieldDataFields ...string) *InnerHit {
+ hit.source.FieldDataFields(fieldDataFields...)
+ return hit
+}
+
+func (hit *InnerHit) FieldDataField(fieldDataField string) *InnerHit {
+ hit.source.FieldDataField(fieldDataField)
+ return hit
+}
+
+func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit {
+ hit.source.ScriptFields(scriptFields...)
+ return hit
+}
+
+func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit {
+ hit.source.ScriptField(scriptField)
+ return hit
+}
+
+func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit {
+ hit.source.Sort(field, ascending)
+ return hit
+}
+
+func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit {
+ hit.source.SortWithInfo(info)
+ return hit
+}
+
+func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit {
+ hit.source.SortBy(sorter...)
+ return hit
+}
+
+func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit {
+ hit.source.Highlight(highlight)
+ return hit
+}
+
+func (hit *InnerHit) Highlighter() *Highlight {
+ return hit.source.Highlighter()
+}
+
+func (hit *InnerHit) Name(name string) *InnerHit {
+ hit.name = name
+ return hit
+}
+
+func (hit *InnerHit) Source() interface{} {
+ source, ok := hit.source.Source().(map[string]interface{})
+ if !ok {
+ return nil
+ }
+
+ // Notice that hit.typ and hit.path are not exported here.
+ // They are only used with SearchSource and serialized there.
+
+ if hit.name != "" {
+ source["name"] = hit.name
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get.go
new file mode 100644
index 0000000..5ab946c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_get.go
@@ -0,0 +1,194 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+)
+
+type MultiGetService struct {
+ client *Client
+ preference string
+ realtime *bool
+ refresh *bool
+ items []*MultiGetItem
+}
+
+func NewMultiGetService(client *Client) *MultiGetService {
+ builder := &MultiGetService{
+ client: client,
+ items: make([]*MultiGetItem, 0),
+ }
+ return builder
+}
+
+func (b *MultiGetService) Preference(preference string) *MultiGetService {
+ b.preference = preference
+ return b
+}
+
+func (b *MultiGetService) Refresh(refresh bool) *MultiGetService {
+ b.refresh = &refresh
+ return b
+}
+
+func (b *MultiGetService) Realtime(realtime bool) *MultiGetService {
+ b.realtime = &realtime
+ return b
+}
+
+func (b *MultiGetService) Add(items ...*MultiGetItem) *MultiGetService {
+ b.items = append(b.items, items...)
+ return b
+}
+
+func (b *MultiGetService) Source() interface{} {
+ source := make(map[string]interface{})
+ items := make([]interface{}, len(b.items))
+ for i, item := range b.items {
+ items[i] = item.Source()
+ }
+ source["docs"] = items
+ return source
+}
+
+func (b *MultiGetService) Do() (*MultiGetResult, error) {
+ // Build url
+ path := "/_mget"
+
+ params := make(url.Values)
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ }
+ if b.preference != "" {
+ params.Add("preference", b.preference)
+ }
+ if b.refresh != nil {
+ params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+ }
+
+ // Set body
+ body := b.Source()
+
+ // Get response
+ res, err := b.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(MultiGetResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Multi Get Item --
+
+// MultiGetItem is a single document to retrieve via the MultiGetService.
+type MultiGetItem struct {
+ index string
+ typ string
+ id string
+ routing string
+ fields []string
+ version *int64 // see org.elasticsearch.common.lucene.uid.Versions
+ versionType string // see org.elasticsearch.index.VersionType
+ fsc *FetchSourceContext
+}
+
+func NewMultiGetItem() *MultiGetItem {
+ return &MultiGetItem{}
+}
+
+func (item *MultiGetItem) Index(index string) *MultiGetItem {
+ item.index = index
+ return item
+}
+
+func (item *MultiGetItem) Type(typ string) *MultiGetItem {
+ item.typ = typ
+ return item
+}
+
+func (item *MultiGetItem) Id(id string) *MultiGetItem {
+ item.id = id
+ return item
+}
+
+func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
+ item.routing = routing
+ return item
+}
+
+func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem {
+ if item.fields == nil {
+ item.fields = make([]string, 0)
+ }
+ item.fields = append(item.fields, fields...)
+ return item
+}
+
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default in Elasticsearch is MatchAny (-3).
+func (item *MultiGetItem) Version(version int64) *MultiGetItem {
+ item.version = &version
+ return item
+}
+
+// VersionType can be "internal", "external", "external_gt", "external_gte",
+// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
+ item.versionType = versionType
+ return item
+}
+
+func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
+ item.fsc = fetchSourceContext
+ return item
+}
+
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiGet search.
+func (item *MultiGetItem) Source() interface{} {
+ source := make(map[string]interface{})
+
+ source["_id"] = item.id
+
+ if item.index != "" {
+ source["_index"] = item.index
+ }
+ if item.typ != "" {
+ source["_type"] = item.typ
+ }
+ if item.fsc != nil {
+ source["_source"] = item.fsc.Source()
+ }
+ if item.fields != nil {
+ source["fields"] = item.fields
+ }
+ if item.routing != "" {
+ source["_routing"] = item.routing
+ }
+ if item.version != nil {
+ source["version"] = fmt.Sprintf("%d", *item.version)
+ }
+ if item.versionType != "" {
+ source["version_type"] = item.versionType
+ }
+
+ return source
+}
+
+// -- Result of a Multi Get request.
+
+type MultiGetResult struct {
+ Docs []*GetResult `json:"docs,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search.go
new file mode 100644
index 0000000..f42d5e5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/multi_search.go
@@ -0,0 +1,101 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// MultiSearch executes one or more searches in one roundtrip.
+// See http://www.elasticsearch.org/guide/reference/api/multi-search/
+type MultiSearchService struct {
+ client *Client
+ requests []*SearchRequest
+ indices []string
+ pretty bool
+ routing string
+ preference string
+}
+
+func NewMultiSearchService(client *Client) *MultiSearchService {
+ builder := &MultiSearchService{
+ client: client,
+ requests: make([]*SearchRequest, 0),
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
+ s.requests = append(s.requests, requests...)
+ return s
+}
+
+func (s *MultiSearchService) Index(index string) *MultiSearchService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *MultiSearchService) Indices(indices ...string) *MultiSearchService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
+ // Build url
+ path := "/_msearch"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Set body
+ lines := make([]string, 0)
+ for _, sr := range s.requests {
+ // Set default indices if not specified in the request
+ if !sr.HasIndices() && len(s.indices) > 0 {
+ sr = sr.Indices(s.indices...)
+ }
+
+ header, err := json.Marshal(sr.header())
+ if err != nil {
+ return nil, err
+ }
+ body, err := json.Marshal(sr.body())
+ if err != nil {
+ return nil, err
+ }
+ lines = append(lines, string(header))
+ lines = append(lines, string(body))
+ }
+ body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
+
+ // Get response
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(MultiSearchResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type MultiSearchResult struct {
+ Responses []*SearchResult `json:"responses,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info.go
new file mode 100644
index 0000000..e0f601e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/nodes_info.go
@@ -0,0 +1,311 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+ "time"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// NodesInfoService allows to retrieve one or more or all of the
+// cluster nodes information.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html.
+type NodesInfoService struct {
+ client *Client
+ pretty bool
+ nodeId []string
+ metric []string
+ flatSettings *bool
+ human *bool
+}
+
+// NewNodesInfoService creates a new NodesInfoService.
+func NewNodesInfoService(client *Client) *NodesInfoService {
+ return &NodesInfoService{
+ client: client,
+ nodeId: []string{"_all"},
+ metric: []string{"_all"},
+ }
+}
+
+// NodeId is a list of node IDs or names to limit the returned information.
+// Use "_local" to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService {
+ s.nodeId = make([]string, 0)
+ s.nodeId = append(s.nodeId, nodeId...)
+ return s
+}
+
+// Metric is a list of metrics you wish returned. Leave empty to return all.
+// Valid metrics are: settings, os, process, jvm, thread_pool, network,
+// transport, http, and plugins.
+func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService {
+ s.metric = make([]string, 0)
+ s.metric = append(s.metric, metric...)
+ return s
+}
+
+// FlatSettings returns settings in flat format (default: false).
+func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService {
+ s.flatSettings = &flatSettings
+ return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format.
+func (s *NodesInfoService) Human(human bool) *NodesInfoService {
+ s.human = &human
+ return s
+}
+
+// Pretty indicates whether to indent the returned JSON.
+func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService {
+ s.pretty = pretty
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *NodesInfoService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+ "metric": strings.Join(s.metric, ","),
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+ }
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ }
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *NodesInfoService) Validate() error {
+ return nil
+}
+
+// Do executes the operation.
+func (s *NodesInfoService) Do() (*NodesInfoResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(NodesInfoResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// NodesInfoResponse is the response of NodesInfoService.Do.
+type NodesInfoResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes map[string]*NodesInfoNode `json:"nodes"`
+}
+
+type NodesInfoNode struct {
+ // Name of the node, e.g. "Mister Fear"
+ Name string `json:"name"`
+ // TransportAddress, e.g. "inet[/127.0.0.1:9300]"
+ TransportAddress string `json:"transport_address"`
+ // Host is the host name, e.g. "macbookair"
+ Host string `json:"host"`
+ // IP is the IP address, e.g. "192.168.1.2"
+ IP string `json:"ip"`
+ // Version is the Elasticsearch version running on the node, e.g. "1.4.3"
+ Version string `json:"version"`
+ // Build is the Elasticsearch build, e.g. "36a29a7"
+ Build string `json:"build"`
+ // HTTPAddress, e.g. "inet[/127.0.0.1:9200]"
+ HTTPAddress string `json:"http_address"`
+ // HTTPSAddress, e.g. "inet[/127.0.0.1:9200]"
+ HTTPSAddress string `json:"https_address"`
+
+ // Settings of the node, e.g. paths and pidfile.
+ Settings map[string]interface{} `json:"settings"`
+
+ // OS information, e.g. CPU and memory.
+ OS *NodesInfoNodeOS `json:"os"`
+
+ // Process information, e.g. max file descriptors.
+ Process *NodesInfoNodeProcess `json:"process"`
+
+ // JVM information, e.g. VM version.
+ JVM *NodesInfoNodeProcess `json:"jvm"`
+
+ // ThreadPool information.
+ ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
+
+ // Network information.
+ Network *NodesInfoNodeNetwork `json:"network"`
+
+ // Network information.
+ Transport *NodesInfoNodeTransport `json:"transport"`
+
+ // HTTP information.
+ HTTP *NodesInfoNodeHTTP `json:"http"`
+
+ // Plugins information.
+ Plugins []*NodesInfoNodePlugin `json:"plugins"`
+}
+
+type NodesInfoNodeOS struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ AvailableProcessors int `json:"available_processors"` // e.g. 4
+
+ // CPU information
+ CPU struct {
+ Vendor string `json:"vendor"` // e.g. Intel
+ Model string `json:"model"` // e.g. iMac15,1
+ MHz int `json:"mhz"` // e.g. 3500
+ TotalCores int `json:"total_cores"` // e.g. 4
+ TotalSockets int `json:"total_sockets"` // e.g. 4
+ CoresPerSocket int `json:"cores_per_socket"` // e.g. 16
+ CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256
+ } `json:"cpu"`
+
+ // Mem information
+ Mem struct {
+ Total string `json:"total"` // e.g. 16gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184
+ } `json:"mem"`
+
+ // Swap information
+ Swap struct {
+ Total string `json:"total"` // e.g. 1gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824
+ } `json:"swap"`
+}
+
+type NodesInfoNodeProcess struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ ID int `json:"id"` // process id, e.g. 87079
+ MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768
+ Mlockall bool `json:"mlockall"` // e.g. false
+}
+
+type NodesInfoNodeJVM struct {
+ PID int `json:"pid"` // process id, e.g. 87079
+ Version string `json:"version"` // e.g. "1.8.0_25"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.25-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z"
+ StartTimeInMillis int64 `json:"start_time_in_millis"`
+
+ // Mem information
+ Mem struct {
+ HeapInit string `json:"heap_init"` // e.g. 1gb
+ HeapInitInBytes int `json:"heap_init_in_bytes"`
+ HeapMax string `json:"heap_max"` // e.g. 4gb
+ HeapMaxInBytes int `json:"heap_max_in_bytes"`
+ NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb
+ NonHeapInitInBytes int `json:"non_heap_init_in_bytes"`
+ NonHeapMax string `json:"non_heap_max"` // e.g. 0b
+ NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"`
+ DirectMax string `json:"direct_max"` // e.g. 4gb
+ DirectMaxInBytes int `json:"direct_max_in_bytes"`
+ } `json:"mem"`
+
+ GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"]
+ MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"]
+}
+
+type NodesInfoNodeThreadPool struct {
+ Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"`
+ Bench *NodesInfoNodeThreadPoolSection `json:"bench"`
+ Listener *NodesInfoNodeThreadPoolSection `json:"listener"`
+ Index *NodesInfoNodeThreadPoolSection `json:"index"`
+ Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"`
+ Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"`
+ Generic *NodesInfoNodeThreadPoolSection `json:"generic"`
+ Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"`
+ Search *NodesInfoNodeThreadPoolSection `json:"search"`
+ Flush *NodesInfoNodeThreadPoolSection `json:"flush"`
+ Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"`
+ Management *NodesInfoNodeThreadPoolSection `json:"management"`
+ Get *NodesInfoNodeThreadPoolSection `json:"get"`
+ Merge *NodesInfoNodeThreadPoolSection `json:"merge"`
+ Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"`
+ Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"`
+}
+
+type NodesInfoNodeThreadPoolSection struct {
+ Type string `json:"type"` // e.g. fixed
+ Min int `json:"min"` // e.g. 4
+ Max int `json:"max"` // e.g. 4
+ KeepAlive string `json:"keep_alive"` // e.g. "5m"
+ QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1
+}
+
+type NodesInfoNodeNetwork struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ PrimaryInterface struct {
+ Address string `json:"address"` // e.g. 192.168.1.2
+ Name string `json:"name"` // e.g. en0
+ MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66
+ } `json:"primary_interface"`
+}
+
+type NodesInfoNodeTransport struct {
+ BoundAddress string `json:"bound_address"` // e.g. inet[/127.0.0.1:9300]
+ PublishAddress string `json:"publish_address"` // e.g. inet[/127.0.0.1:9300]
+}
+
+type NodesInfoNodeHTTP struct {
+ BoundAddress string `json:"bound_address"` // e.g. inet[/127.0.0.1:9300]
+ PublishAddress string `json:"publish_address"` // e.g. inet[/127.0.0.1:9300]
+ MaxContentLength string `json:"max_content_length"` // e.g. "100mb"
+ MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"`
+}
+
+type NodesInfoNodePlugin struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Site bool `json:"site"`
+ JVM bool `json:"jvm"`
+ URL string `json:"url"` // e.g. /_plugin/dummy/
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize.go
new file mode 100644
index 0000000..16488d4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/optimize.go
@@ -0,0 +1,135 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type OptimizeService struct {
+ client *Client
+ indices []string
+ maxNumSegments *int
+ onlyExpungeDeletes *bool
+ flush *bool
+ waitForMerge *bool
+ force *bool
+ pretty bool
+}
+
+func NewOptimizeService(client *Client) *OptimizeService {
+ builder := &OptimizeService{
+ client: client,
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *OptimizeService) Index(index string) *OptimizeService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *OptimizeService) Indices(indices ...string) *OptimizeService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService {
+ s.maxNumSegments = &maxNumSegments
+ return s
+}
+
+func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService {
+ s.onlyExpungeDeletes = &onlyExpungeDeletes
+ return s
+}
+
+func (s *OptimizeService) Flush(flush bool) *OptimizeService {
+ s.flush = &flush
+ return s
+}
+
+func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService {
+ s.waitForMerge = &waitForMerge
+ return s
+}
+
+func (s *OptimizeService) Force(force bool) *OptimizeService {
+ s.force = &force
+ return s
+}
+
+func (s *OptimizeService) Pretty(pretty bool) *OptimizeService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *OptimizeService) Do() (*OptimizeResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ path += "/_optimize"
+
+ // Parameters
+ params := make(url.Values)
+ if s.maxNumSegments != nil {
+ params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments))
+ }
+ if s.onlyExpungeDeletes != nil {
+ params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
+ }
+ if s.flush != nil {
+ params.Set("flush", fmt.Sprintf("%v", *s.flush))
+ }
+ if s.waitForMerge != nil {
+ params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
+ }
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(OptimizeResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of an optimize request.
+
+type OptimizeResult struct {
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate.go
new file mode 100644
index 0000000..69d04df
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/percolate.go
@@ -0,0 +1,310 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html.
+type PercolateService struct {
+ client *Client
+ pretty bool
+ index string
+ typ string
+ id string
+ version interface{}
+ versionType string
+ routing []string
+ preference string
+ ignoreUnavailable *bool
+ percolateIndex string
+ percolatePreference string
+ percolateRouting string
+ source string
+ allowNoIndices *bool
+ expandWildcards string
+ percolateFormat string
+ percolateType string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewPercolateService creates a new PercolateService.
+func NewPercolateService(client *Client) *PercolateService {
+ return &PercolateService{
+ client: client,
+ routing: make([]string, 0),
+ }
+}
+
+// Index is the name of the index of the document being percolated.
+func (s *PercolateService) Index(index string) *PercolateService {
+ s.index = index
+ return s
+}
+
+// Type is the type of the document being percolated.
+func (s *PercolateService) Type(typ string) *PercolateService {
+ s.typ = typ
+ return s
+}
+
+// Id is to substitute the document in the request body with a
+// document that is known by the specified id. On top of the id,
+// the index and type parameter will be used to retrieve
+// the document from within the cluster.
+func (s *PercolateService) Id(id string) *PercolateService {
+ s.id = id
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expressions
+// to concrete indices that are open, closed or both.
+func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// PercolateFormat indicates whether to return an array of matching
+// query IDs instead of objects.
+func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService {
+ s.percolateFormat = percolateFormat
+ return s
+}
+
+// PercolateType is the type to percolate document into. Defaults to type.
+func (s *PercolateService) PercolateType(percolateType string) *PercolateService {
+ s.percolateType = percolateType
+ return s
+}
+
+// PercolateRouting is the routing value to use when percolating
+// the existing document.
+func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService {
+ s.percolateRouting = percolateRouting
+ return s
+}
+
+// Source is the URL-encoded request definition.
+func (s *PercolateService) Source(source string) *PercolateService {
+ s.source = source
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// PercolateIndex is the index to percolate the document into. Defaults to index.
+func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService {
+ s.percolateIndex = percolateIndex
+ return s
+}
+
+// PercolatePreference defines which shard to prefer when executing
+// the percolate request.
+func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService {
+ s.percolatePreference = percolatePreference
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PercolateService) Version(version interface{}) *PercolateService {
+ s.version = version
+ return s
+}
+
+// VersionType is the specific version type.
+func (s *PercolateService) VersionType(versionType string) *PercolateService {
+ s.versionType = versionType
+ return s
+}
+
+// Routing is a list of specific routing values.
+func (s *PercolateService) Routing(routing []string) *PercolateService {
+ s.routing = routing
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *PercolateService) Preference(preference string) *PercolateService {
+ s.preference = preference
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *PercolateService) Pretty(pretty bool) *PercolateService {
+ s.pretty = pretty
+ return s
+}
+
+// Doc wraps the given document into the "doc" key of the body.
+func (s *PercolateService) Doc(doc interface{}) *PercolateService {
+ return s.BodyJson(map[string]interface{}{"doc": doc})
+}
+
+// BodyJson is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyJson(body interface{}) *PercolateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyString(body string) *PercolateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PercolateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ var path string
+ var err error
+ if s.id == "" {
+ path, err = uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ })
+ } else {
+ path, err = uritemplates.Expand("/{index}/{type}/{id}/_percolate", map[string]string{
+ "index": s.index,
+ "type": s.typ,
+ "id": s.id,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ if len(s.routing) > 0 {
+ params.Set("routing", strings.Join(s.routing, ","))
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.percolateIndex != "" {
+ params.Set("percolate_index", s.percolateIndex)
+ }
+ if s.percolatePreference != "" {
+ params.Set("percolate_preference", s.percolatePreference)
+ }
+ if s.percolateRouting != "" {
+ params.Set("percolate_routing", s.percolateRouting)
+ }
+ if s.source != "" {
+ params.Set("source", s.source)
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.percolateFormat != "" {
+ params.Set("percolate_format", s.percolateFormat)
+ }
+ if s.percolateType != "" {
+ params.Set("percolate_type", s.percolateType)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PercolateService) Validate() error {
+ var invalid []string
+ if s.index == "" {
+ invalid = append(invalid, "Index")
+ }
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *PercolateService) Do() (*PercolateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PercolateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PercolateResponse is the response of PercolateService.Do.
+type PercolateResponse struct {
+ TookInMillis int64 `json:"took"` // search time in milliseconds
+ Total int64 `json:"total"` // total matches
+ Matches []*PercolateMatch `json:"matches,omitempty"`
+ Facets SearchFacets `json:"facets,omitempty"` // results from facets
+ Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations
+}
+
+// PercolateMatch returns a single match in a PercolateResponse.
+type PercolateMatch struct {
+ Index string `json:"_index,omitempty"`
+ Id string `json:"_id"`
+ Score float64 `json:"_score,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping.go
new file mode 100644
index 0000000..84a2438
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/ping.go
@@ -0,0 +1,117 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+)
+
+// PingService checks if an Elasticsearch server on a given URL is alive.
+// When asked for, it can also return various information about the
+// Elasticsearch server, e.g. the Elasticsearch version number.
+//
+// Ping simply starts a HTTP GET request to the URL of the server.
+// If the server responds with HTTP Status code 200 OK, the server is alive.
+type PingService struct {
+ client *Client
+ url string
+ timeout string
+ httpHeadOnly bool
+ pretty bool
+}
+
+// PingResult is the result returned from querying the Elasticsearch server.
+type PingResult struct {
+ Status int `json:"status"`
+ Name string `json:"name"`
+ ClusterName string `json:"cluster_name"`
+ Version struct {
+ Number string `json:"number"`
+ BuildHash string `json:"build_hash"`
+ BuildTimestamp string `json:"build_timestamp"`
+ BuildSnapshot bool `json:"build_snapshot"`
+ LuceneVersion string `json:"lucene_version"`
+ } `json:"version"`
+ TagLine string `json:"tagline"`
+}
+
+func NewPingService(client *Client) *PingService {
+ return &PingService{
+ client: client,
+ url: DefaultURL,
+ httpHeadOnly: false,
+ pretty: false,
+ }
+}
+
+func (s *PingService) URL(url string) *PingService {
+ s.url = url
+ return s
+}
+
+func (s *PingService) Timeout(timeout string) *PingService {
+ s.timeout = timeout
+ return s
+}
+
+// HeadOnly makes the service to only return the status code in Do;
+// the PingResult will be nil.
+func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
+ s.httpHeadOnly = httpHeadOnly
+ return s
+}
+
+func (s *PingService) Pretty(pretty bool) *PingService {
+ s.pretty = pretty
+ return s
+}
+
+// Do returns the PingResult, the HTTP status code of the Elasticsearch
+// server, and an error.
+func (s *PingService) Do() (*PingResult, int, error) {
+ url_ := s.url + "/"
+
+ params := make(url.Values)
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if len(params) > 0 {
+ url_ += "?" + params.Encode()
+ }
+
+ var method string
+ if s.httpHeadOnly {
+ method = "HEAD"
+ } else {
+ method = "GET"
+ }
+
+ // Notice: This service must NOT use PerformRequest!
+ req, err := NewRequest(method, url_)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ res, err := s.client.c.Do((*http.Request)(req))
+ if err != nil {
+ return nil, 0, err
+ }
+ defer res.Body.Close()
+
+ var ret *PingResult
+ if !s.httpHeadOnly {
+ ret = new(PingResult)
+ if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+ return nil, res.StatusCode, err
+ }
+ }
+
+ return ret, res.StatusCode, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping.go
new file mode 100644
index 0000000..0491e50
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_mapping.go
@@ -0,0 +1,222 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+)
+
+// PutMappingService allows to register specific mapping definition
+// for a specific type.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-put-mapping.html.
+type PutMappingService struct {
+ client *Client
+ pretty bool
+ typ string
+ index []string
+ masterTimeout string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ ignoreConflicts *bool
+ timeout string
+ bodyJson map[string]interface{}
+ bodyString string
+}
+
+// NewPutMappingService creates a new PutMappingService.
+func NewPutMappingService(client *Client) *PutMappingService {
+ return &PutMappingService{
+ client: client,
+ index: make([]string, 0),
+ }
+}
+
+// Index is a list of index names the mapping should be added to
+// (supports wildcards); use `_all` or omit to add the mapping on all indices.
+func (s *PutMappingService) Index(index ...string) *PutMappingService {
+ s.index = append(s.index, index...)
+ return s
+}
+
+// Type is the name of the document type.
+func (s *PutMappingService) Type(typ string) *PutMappingService {
+ s.typ = typ
+ return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *PutMappingService) Timeout(timeout string) *PutMappingService {
+ s.timeout = timeout
+ return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *PutMappingService) MasterTimeout(masterTimeout string) *PutMappingService {
+ s.masterTimeout = masterTimeout
+ return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *PutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *PutMappingService {
+ s.ignoreUnavailable = &ignoreUnavailable
+ return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *PutMappingService) AllowNoIndices(allowNoIndices bool) *PutMappingService {
+ s.allowNoIndices = &allowNoIndices
+ return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *PutMappingService) ExpandWildcards(expandWildcards string) *PutMappingService {
+ s.expandWildcards = expandWildcards
+ return s
+}
+
+// IgnoreConflicts specifies whether to ignore conflicts while updating
+// the mapping (default: false).
+func (s *PutMappingService) IgnoreConflicts(ignoreConflicts bool) *PutMappingService {
+ s.ignoreConflicts = &ignoreConflicts
+ return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *PutMappingService) Pretty(pretty bool) *PutMappingService {
+ s.pretty = pretty
+ return s
+}
+
+// BodyJson contains the mapping definition.
+func (s *PutMappingService) BodyJson(mapping map[string]interface{}) *PutMappingService {
+ s.bodyJson = mapping
+ return s
+}
+
+// BodyString is the mapping definition serialized as a string.
+func (s *PutMappingService) BodyString(mapping string) *PutMappingService {
+ s.bodyString = mapping
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutMappingService) buildURL() (string, url.Values, error) {
+ var err error
+ var path string
+
+ // Build URL: Typ MUST be specified and is verified in Validate.
+ if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": s.typ,
+ })
+ } else {
+ path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{
+ "type": s.typ,
+ })
+ }
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.pretty {
+ params.Set("pretty", "1")
+ }
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ }
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ }
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ }
+ if s.ignoreConflicts != nil {
+ params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts))
+ }
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ }
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ }
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutMappingService) Validate() error {
+ var invalid []string
+ if s.typ == "" {
+ invalid = append(invalid, "Type")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *PutMappingService) Do() (*PutMappingResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PutMappingResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PutMappingResponse is the response of PutMappingService.Do.
+type PutMappingResponse struct {
+ Acknowledged bool `json:"acknowledged"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_template.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_template.go
new file mode 100644
index 0000000..4a4a84b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/put_template.go
@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// PutTemplateService creates or updates a search template.
+// The documentation can be found at
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type PutTemplateService struct {
+ client *Client
+ pretty bool
+ id string
+ opType string
+ version *int
+ versionType string
+ bodyJson interface{}
+ bodyString string
+}
+
+// NewPutTemplateService creates a new PutTemplateService.
+func NewPutTemplateService(client *Client) *PutTemplateService {
+ return &PutTemplateService{
+ client: client,
+ }
+}
+
+// Id is the template ID.
+func (s *PutTemplateService) Id(id string) *PutTemplateService {
+ s.id = id
+ return s
+}
+
+// OpType is an explicit operation type.
+func (s *PutTemplateService) OpType(opType string) *PutTemplateService {
+ s.opType = opType
+ return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PutTemplateService) Version(version int) *PutTemplateService {
+ s.version = &version
+ return s
+}
+
+// VersionType is a specific version type.
+func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService {
+ s.versionType = versionType
+ return s
+}
+
+// BodyJson is the document as a JSON serializable object.
+func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService {
+ s.bodyJson = body
+ return s
+}
+
+// BodyString is the document as a string.
+func (s *PutTemplateService) BodyString(body string) *PutTemplateService {
+ s.bodyString = body
+ return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutTemplateService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ "id": s.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Add query string parameters
+ params := url.Values{}
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ }
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+ }
+ if s.opType != "" {
+ params.Set("op_type", s.opType)
+ }
+
+ return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutTemplateService) Validate() error {
+ var invalid []string
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+ }
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+ }
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+ }
+ return nil
+}
+
+// Do executes the operation.
+func (s *PutTemplateService) Do() (*PutTemplateResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Get URL for request
+ path, params, err := s.buildURL()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup HTTP request body
+ var body interface{}
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ } else {
+ body = s.bodyString
+ }
+
+ // Get HTTP response
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return operation response
+ ret := new(PutTemplateResponse)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// PutTemplateResponse is the response of PutTemplateService.Do.
+type PutTemplateResponse struct {
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+ Created bool `json:"created"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/query.go
new file mode 100644
index 0000000..0c9e670
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/query.go
@@ -0,0 +1,14 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic query interface.
+// A querys' only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Query interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh.go
new file mode 100644
index 0000000..1f0ded8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/refresh.go
@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+type RefreshService struct {
+ client *Client
+ indices []string
+ force *bool
+ pretty bool
+}
+
+func NewRefreshService(client *Client) *RefreshService {
+ builder := &RefreshService{
+ client: client,
+ indices: make([]string, 0),
+ }
+ return builder
+}
+
+func (s *RefreshService) Index(index string) *RefreshService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *RefreshService) Indices(indices ...string) *RefreshService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *RefreshService) Force(force bool) *RefreshService {
+ s.force = &force
+ return s
+}
+
+func (s *RefreshService) Pretty(pretty bool) *RefreshService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *RefreshService) Do() (*RefreshResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ path += "/_refresh"
+
+ // Parameters
+ params := make(url.Values)
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(RefreshResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// -- Result of a refresh request.
+
+type RefreshResult struct {
+ Shards shardsInfo `json:"_shards,omitempty"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer.go
new file mode 100644
index 0000000..5810f19
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/reindexer.go
@@ -0,0 +1,270 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+// Reindexer simplifies the process of reindexing an index. You typically
+// reindex a source index to a target index. However, you can also specify
+// a query that filters out documents from the source index before bulk
+// indexing them into the target index. The caller may also specify a
+// different client for the target, e.g. when copying indices from one
+// Elasticsearch cluster to another.
+//
+// Internally, the Reindex users a scan and scroll operation on the source
+// index and bulk indexing to push data into the target index.
+//
+// By default the reindexer fetches the _source, _parent, and _routing
+// attributes from the source index, using the provided CopyToTargetIndex
+// will copy those attributes into the destinationIndex.
+// This behaviour can be overridden by setting the ScanFields and providing a
+// custom ReindexerFunc.
+//
+// The caller is responsible for setting up and/or clearing the target index
+// before starting the reindex process.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+type Reindexer struct {
+ sourceClient, targetClient *Client
+ sourceIndex string
+ query Query
+ scanFields []string
+ bulkSize int
+ size int
+ scroll string
+ reindexerFunc ReindexerFunc
+ progress ReindexerProgressFunc
+ statsOnly bool
+}
+
+// A ReindexerFunc receives each hit from the sourceIndex.
+// It can choose to add any number of BulkableRequests to the bulkService.
+type ReindexerFunc func(hit *SearchHit, bulkService *BulkService) error
+
+// CopyToTargetIndex returns a ReindexerFunc that copies the SearchHit's
+// _source, _parent, and _routing attributes into the targetIndex
+func CopyToTargetIndex(targetIndex string) ReindexerFunc {
+ return func(hit *SearchHit, bulkService *BulkService) error {
+ // TODO(oe) Do we need to deserialize here?
+ source := make(map[string]interface{})
+ if err := json.Unmarshal(*hit.Source, &source); err != nil {
+ return err
+ }
+ req := NewBulkIndexRequest().Index(targetIndex).Type(hit.Type).Id(hit.Id).Doc(source)
+ if parent, ok := hit.Fields["_parent"].(string); ok {
+ req.Parent(parent)
+ }
+ if routing, ok := hit.Fields["_routing"].(string); ok {
+ req.Routing(routing)
+ }
+ bulkService.Add(req)
+ return nil
+ }
+}
+
+// ReindexerProgressFunc is a callback that can be used with Reindexer
+// to report progress while reindexing data.
+type ReindexerProgressFunc func(current, total int64)
+
+// ReindexerResponse is returned from the Do func in a Reindexer.
+// By default, it returns the number of succeeded and failed bulk operations.
+// To return details about all failed items, set StatsOnly to false in
+// Reindexer.
+type ReindexerResponse struct {
+ Success int64
+ Failed int64
+ Errors []*BulkResponseItem
+}
+
+// NewReindexer returns a new Reindexer.
+func NewReindexer(client *Client, source string, reindexerFunc ReindexerFunc) *Reindexer {
+ return &Reindexer{
+ sourceClient: client,
+ sourceIndex: source,
+ reindexerFunc: reindexerFunc,
+ statsOnly: true,
+ }
+}
+
+// TargetClient specifies a different client for the target. This is
+// necessary when the target index is in a different Elasticsearch cluster.
+// By default, the source and target clients are the same.
+func (ix *Reindexer) TargetClient(c *Client) *Reindexer {
+ ix.targetClient = c
+ return ix
+}
+
+// Query specifies the query to apply to the source. It filters out those
+// documents to be indexed into target. A nil query does not filter out any
+// documents.
+func (ix *Reindexer) Query(q Query) *Reindexer {
+ ix.query = q
+ return ix
+}
+
+// ScanFields specifies the fields the scan query should load.
+// The default fields are _source, _parent, _routing.
+func (ix *Reindexer) ScanFields(scanFields ...string) *Reindexer {
+ ix.scanFields = scanFields
+ return ix
+}
+
+// BulkSize returns the number of documents to send to Elasticsearch per chunk.
+// The default is 500.
+func (ix *Reindexer) BulkSize(bulkSize int) *Reindexer {
+ ix.bulkSize = bulkSize
+ return ix
+}
+
+// Size is the number of results to return per shard, not per request.
+// So a size of 10 which hits 5 shards will return a maximum of 50 results
+// per scan request.
+func (ix *Reindexer) Size(size int) *Reindexer {
+ ix.size = size
+ return ix
+}
+
+// Scroll specifies for how long the scroll operation on the source index
+// should be maintained. The default is 5m.
+func (ix *Reindexer) Scroll(timeout string) *Reindexer {
+ ix.scroll = timeout
+ return ix
+}
+
+// Progress indicates a callback that will be called while indexing.
+func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer {
+ ix.progress = f
+ return ix
+}
+
+// StatsOnly indicates whether the Do method should return details e.g. about
+// the documents that failed while indexing. It is true by default, i.e. only
+// the number of documents that succeeded/failed are returned. Set to false
+// if you want all the details.
+func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer {
+ ix.statsOnly = statsOnly
+ return ix
+}
+
+// Do starts the reindexing process.
+func (ix *Reindexer) Do() (*ReindexerResponse, error) {
+ if ix.sourceClient == nil {
+ return nil, errors.New("no source client")
+ }
+ if ix.sourceIndex == "" {
+ return nil, errors.New("no source index")
+ }
+ if ix.targetClient == nil {
+ ix.targetClient = ix.sourceClient
+ }
+ if ix.scanFields == nil {
+ ix.scanFields = []string{"_source", "_parent", "_routing"}
+ }
+ if ix.bulkSize <= 0 {
+ ix.bulkSize = 500
+ }
+ if ix.scroll == "" {
+ ix.scroll = "5m"
+ }
+
+ // Count total to report progress (if necessary)
+ var err error
+ var current, total int64
+ if ix.progress != nil {
+ total, err = ix.count()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Prepare scan and scroll to iterate through the source index
+ scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll).Fields(ix.scanFields...)
+ if ix.query != nil {
+ scanner = scanner.Query(ix.query)
+ }
+ if ix.size > 0 {
+ scanner = scanner.Size(ix.size)
+ }
+ cursor, err := scanner.Do()
+
+ bulk := ix.targetClient.Bulk()
+
+ ret := &ReindexerResponse{
+ Errors: make([]*BulkResponseItem, 0),
+ }
+
+ // Main loop iterates through the source index and bulk indexes into target.
+ for {
+ docs, err := cursor.Next()
+ if err == EOS {
+ break
+ }
+ if err != nil {
+ return ret, err
+ }
+
+ if docs.TotalHits() > 0 {
+ for _, hit := range docs.Hits.Hits {
+ if ix.progress != nil {
+ current++
+ ix.progress(current, total)
+ }
+
+ err := ix.reindexerFunc(hit, bulk)
+ if err != nil {
+ return ret, err
+ }
+
+ if bulk.NumberOfActions() >= ix.bulkSize {
+ bulk, err = ix.commit(bulk, ret)
+ if err != nil {
+ return ret, err
+ }
+ }
+ }
+ }
+ }
+
+ // Final flush
+ if bulk.NumberOfActions() > 0 {
+ bulk, err = ix.commit(bulk, ret)
+ if err != nil {
+ return ret, err
+ }
+ bulk = nil
+ }
+
+ return ret, nil
+}
+
+// count returns the number of documents in the source index.
+// The query is taken into account, if specified.
+func (ix *Reindexer) count() (int64, error) {
+ service := ix.sourceClient.Count(ix.sourceIndex)
+ if ix.query != nil {
+ service = service.Query(ix.query)
+ }
+ return service.Do()
+}
+
+// commit commits a bulk, updates the stats, and returns a fresh bulk service.
+func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) {
+ bres, err := bulk.Do()
+ if err != nil {
+ return nil, err
+ }
+ ret.Success += int64(len(bres.Succeeded()))
+ failed := bres.Failed()
+ ret.Failed += int64(len(failed))
+ if !ix.statsOnly {
+ ret.Errors = append(ret.Errors, failed...)
+ }
+ bulk = ix.targetClient.Bulk()
+ return bulk, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/request.go
new file mode 100644
index 0000000..eb5a3b1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/request.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "runtime"
+ "strings"
+)
+
+// Elasticsearch-specific HTTP request
+type Request http.Request
+
+func NewRequest(method, url string) (*Request, error) {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")")
+ req.Header.Add("Accept", "application/json")
+ return (*Request)(req), nil
+}
+
+func (r *Request) SetBodyJson(data interface{}) error {
+ body, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ r.SetBody(bytes.NewReader(body))
+ r.Header.Set("Content-Type", "application/json")
+ return nil
+}
+
+func (r *Request) SetBodyString(body string) error {
+ return r.SetBody(strings.NewReader(body))
+}
+
+func (r *Request) SetBody(body io.Reader) error {
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = ioutil.NopCloser(body)
+ }
+ r.Body = rc
+ if body != nil {
+ switch v := body.(type) {
+ case *strings.Reader:
+ r.ContentLength = int64(v.Len())
+ case *bytes.Buffer:
+ r.ContentLength = int64(v.Len())
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescore.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescore.go
new file mode 100644
index 0000000..bd57ab7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescore.go
@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescore struct {
+ rescorer Rescorer
+ windowSize *int
+ defaultRescoreWindowSize *int
+}
+
+func NewRescore() *Rescore {
+ return &Rescore{}
+}
+
+func (r *Rescore) WindowSize(windowSize int) *Rescore {
+ r.windowSize = &windowSize
+ return r
+}
+
+func (r *Rescore) IsEmpty() bool {
+ return r.rescorer == nil
+}
+
+func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore {
+ r.rescorer = rescorer
+ return r
+}
+
+func (r *Rescore) Source() interface{} {
+ source := make(map[string]interface{})
+ if r.windowSize != nil {
+ source["window_size"] = *r.windowSize
+ } else if r.defaultRescoreWindowSize != nil {
+ source["window_size"] = *r.defaultRescoreWindowSize
+ }
+ source[r.rescorer.Name()] = r.rescorer.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescorer.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescorer.go
new file mode 100644
index 0000000..cbb8218
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/rescorer.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescorer interface {
+ Name() string
+ Source() interface{}
+}
+
+// -- Query Rescorer --
+
+type QueryRescorer struct {
+ query Query
+ rescoreQueryWeight *float64
+ queryWeight *float64
+ scoreMode string
+}
+
+func NewQueryRescorer(query Query) *QueryRescorer {
+ return &QueryRescorer{
+ query: query,
+ }
+}
+
+func (r *QueryRescorer) Name() string {
+ return "query"
+}
+
+func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer {
+ r.rescoreQueryWeight = &rescoreQueryWeight
+ return r
+}
+
+func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer {
+ r.queryWeight = &queryWeight
+ return r
+}
+
+func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer {
+ r.scoreMode = scoreMode
+ return r
+}
+
+func (r *QueryRescorer) Source() interface{} {
+ source := make(map[string]interface{})
+ source["rescore_query"] = r.query.Source()
+ if r.queryWeight != nil {
+ source["query_weight"] = *r.queryWeight
+ }
+ if r.rescoreQueryWeight != nil {
+ source["rescore_query_weight"] = *r.rescoreQueryWeight
+ }
+ if r.scoreMode != "" {
+ source["score_mode"] = r.scoreMode
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/response.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/response.go
new file mode 100644
index 0000000..9426c23
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/response.go
@@ -0,0 +1,43 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+)
+
+// Response represents a response from Elasticsearch.
+type Response struct {
+ // StatusCode is the HTTP status code, e.g. 200.
+ StatusCode int
+ // Header is the HTTP header from the HTTP response.
+ // Keys in the map are canonicalized (see http.CanonicalHeaderKey).
+ Header http.Header
+ // Body is the deserialized response body.
+ Body json.RawMessage
+}
+
+// newResponse creates a new response from the HTTP response.
+func (c *Client) newResponse(res *http.Response) (*Response, error) {
+ r := &Response{
+ StatusCode: res.StatusCode,
+ Header: res.Header,
+ }
+ if res.Body != nil {
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ // HEAD requests return a body but no content
+ if len(slurp) > 0 {
+ if err := c.decoder.Decode(slurp, &r.Body); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return r, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan.go
new file mode 100644
index 0000000..6093f3a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scan.go
@@ -0,0 +1,373 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+const (
+ defaultKeepAlive = "5m"
+)
+
+var (
+ // End of stream (or scan)
+ EOS = errors.New("EOS")
+
+ // No ScrollId
+ ErrNoScrollId = errors.New("no scrollId")
+)
+
+// ScanService manages a cursor through documents in Elasticsearch.
+type ScanService struct {
+ client *Client
+ indices []string
+ types []string
+ keepAlive string
+ searchSource *SearchSource
+ pretty bool
+ routing string
+ preference string
+ size *int
+}
+
+// NewScanService creates a new service to iterate through the results
+// of a query.
+func NewScanService(client *Client) *ScanService {
+ builder := &ScanService{
+ client: client,
+ searchSource: NewSearchSource().Query(NewMatchAllQuery()),
+ }
+ return builder
+}
+
+// Index sets the name of the index to use for scan.
+func (s *ScanService) Index(index string) *ScanService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices sets the names of the indices to use for scan.
+func (s *ScanService) Indices(indices ...string) *ScanService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Type restricts the scan to the given type.
+func (s *ScanService) Type(typ string) *ScanService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+// Types allows to restrict the scan to a list of types.
+func (s *ScanService) Types(types ...string) *ScanService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScanService) Scroll(keepAlive string) *ScanService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScanService) KeepAlive(keepAlive string) *ScanService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// Fields tells Elasticsearch to only load specific fields from a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html.
+func (s *ScanService) Fields(fields ...string) *ScanService {
+ s.searchSource = s.searchSource.Fields(fields...)
+ return s
+}
+
+// SearchSource sets the search source builder to use with this service.
+func (s *ScanService) SearchSource(searchSource *SearchSource) *ScanService {
+ s.searchSource = searchSource
+ if s.searchSource == nil {
+ s.searchSource = NewSearchSource().Query(NewMatchAllQuery())
+ }
+ return s
+}
+
+// Routing allows for (a comma-separated) list of specific routing values.
+func (s *ScanService) Routing(routings ...string) *ScanService {
+ s.routing = strings.Join(routings, ",")
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: "random").
+func (s *ScanService) Preference(preference string) *ScanService {
+ s.preference = preference
+ return s
+}
+
+// Query sets the query to perform, e.g. MatchAllQuery.
+func (s *ScanService) Query(query Query) *ScanService {
+ s.searchSource = s.searchSource.Query(query)
+ return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html
+// for details.
+func (s *ScanService) PostFilter(postFilter Filter) *ScanService {
+ s.searchSource = s.searchSource.PostFilter(postFilter)
+ return s
+}
+
+// FetchSource indicates whether the response should contain the stored
+// _source for every hit.
+func (s *ScanService) FetchSource(fetchSource bool) *ScanService {
+ s.searchSource = s.searchSource.FetchSource(fetchSource)
+ return s
+}
+
+// FetchSourceContext indicates how the _source should be fetched.
+func (s *ScanService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScanService {
+ s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext)
+ return s
+}
+
+// Version can be set to true to return a version for each search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html.
+func (s *ScanService) Version(version bool) *ScanService {
+ s.searchSource = s.searchSource.Version(version)
+ return s
+}
+
+// Sort the results by the given field, in the given order.
+// Use the alternative SortWithInfo to use a struct to define the sorting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *ScanService) Sort(field string, ascending bool) *ScanService {
+ s.searchSource = s.searchSource.Sort(field, ascending)
+ return s
+}
+
+// SortWithInfo defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *ScanService) SortWithInfo(info SortInfo) *ScanService {
+ s.searchSource = s.searchSource.SortWithInfo(info)
+ return s
+}
+
+// SortBy defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *ScanService) SortBy(sorter ...Sorter) *ScanService {
+ s.searchSource = s.searchSource.SortBy(sorter...)
+ return s
+}
+
+// Pretty enables the caller to indent the JSON output.
+func (s *ScanService) Pretty(pretty bool) *ScanService {
+ s.pretty = pretty
+ return s
+}
+
+// Size is the number of results to return per shard, not per request.
+// So a size of 10 which hits 5 shards will return a maximum of 50 results
+// per scan request.
+func (s *ScanService) Size(size int) *ScanService {
+ s.size = &size
+ return s
+}
+
+// Do executes the query and returns a "server-side cursor".
+func (s *ScanService) Do() (*ScanCursor, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_search"
+
+ // Parameters
+ params := make(url.Values)
+ if !s.searchSource.hasSort() {
+ params.Set("search_type", "scan")
+ }
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.keepAlive != "" {
+ params.Set("scroll", s.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+ if s.size != nil && *s.size > 0 {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+
+ // Get response
+ body := s.searchSource.Source()
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ searchResult := new(SearchResult)
+ if err := json.Unmarshal(res.Body, searchResult); err != nil {
+ return nil, err
+ }
+
+ cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult)
+
+ return cursor, nil
+}
+
+// scanCursor represents a single page of results from
+// an Elasticsearch Scan operation.
+type ScanCursor struct {
+ Results *SearchResult
+
+ client *Client
+ keepAlive string
+ pretty bool
+ currentPage int
+}
+
+// newScanCursor returns a new initialized instance
+// of scanCursor.
+func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor {
+ return &ScanCursor{
+ client: client,
+ keepAlive: keepAlive,
+ pretty: pretty,
+ Results: searchResult,
+ }
+}
+
+// TotalHits is a convenience method that returns the number
+// of hits the cursor will iterate through.
+func (c *ScanCursor) TotalHits() int64 {
+ if c.Results.Hits == nil {
+ return 0
+ }
+ return c.Results.Hits.TotalHits
+}
+
+// Next returns the next search result or nil when all
+// documents have been scanned.
+//
+// Usage:
+//
+// for {
+// res, err := cursor.Next()
+// if err == elastic.EOS {
+// // End of stream (or scan)
+// break
+// }
+// if err != nil {
+// // Handle error
+// }
+// // Work with res
+// }
+//
+func (c *ScanCursor) Next() (*SearchResult, error) {
+ if c.currentPage > 0 {
+ if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 {
+ return nil, EOS
+ }
+ }
+ if c.Results.ScrollId == "" {
+ return nil, EOS
+ }
+
+ // Build url
+ path := "/_search/scroll"
+
+ // Parameters
+ params := make(url.Values)
+ if c.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", c.pretty))
+ }
+ if c.keepAlive != "" {
+ params.Set("scroll", c.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+
+ // Set body
+ body := c.Results.ScrollId
+
+ // Get response
+ res, err := c.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ c.Results = &SearchResult{ScrollId: body}
+ if err := json.Unmarshal(res.Body, c.Results); err != nil {
+ return nil, err
+ }
+
+ c.currentPage += 1
+
+ return c.Results, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll.go
new file mode 100644
index 0000000..ddc3150
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/scroll.go
@@ -0,0 +1,219 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// ScrollService manages a cursor through documents in Elasticsearch.
+type ScrollService struct {
+ client *Client
+ indices []string
+ types []string
+ keepAlive string
+ query Query
+ size *int
+ pretty bool
+ scrollId string
+}
+
+func NewScrollService(client *Client) *ScrollService {
+ builder := &ScrollService{
+ client: client,
+ query: NewMatchAllQuery(),
+ }
+ return builder
+}
+
+func (s *ScrollService) Index(index string) *ScrollService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *ScrollService) Indices(indices ...string) *ScrollService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *ScrollService) Type(typ string) *ScrollService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, typ)
+ return s
+}
+
+func (s *ScrollService) Types(types ...string) *ScrollService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
+ s.keepAlive = keepAlive
+ return s
+}
+
+func (s *ScrollService) Query(query Query) *ScrollService {
+ s.query = query
+ return s
+}
+
+func (s *ScrollService) Pretty(pretty bool) *ScrollService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *ScrollService) Size(size int) *ScrollService {
+ s.size = &size
+ return s
+}
+
+func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
+ s.scrollId = scrollId
+ return s
+}
+
+func (s *ScrollService) Do() (*SearchResult, error) {
+ if s.scrollId == "" {
+ return s.GetFirstPage()
+ }
+ return s.GetNextPage()
+}
+
+func (s *ScrollService) GetFirstPage() (*SearchResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ if len(indexPart) > 0 {
+ path += strings.Join(indexPart, ",")
+ }
+
+ // Types
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_search"
+
+ // Parameters
+ params := make(url.Values)
+ params.Set("search_type", "scan")
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.keepAlive != "" {
+ params.Set("scroll", s.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+ if s.size != nil && *s.size > 0 {
+ params.Set("size", fmt.Sprintf("%d", *s.size))
+ }
+
+ // Set body
+ body := make(map[string]interface{})
+ if s.query != nil {
+ body["query"] = s.query.Source()
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ searchResult := new(SearchResult)
+ if err := json.Unmarshal(res.Body, searchResult); err != nil {
+ return nil, err
+ }
+
+ return searchResult, nil
+}
+
+func (s *ScrollService) GetNextPage() (*SearchResult, error) {
+ if s.scrollId == "" {
+ return nil, EOS
+ }
+
+ // Build url
+ path := "/_search/scroll"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.keepAlive != "" {
+ params.Set("scroll", s.keepAlive)
+ } else {
+ params.Set("scroll", defaultKeepAlive)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, s.scrollId)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ searchResult := new(SearchResult)
+ if err := json.Unmarshal(res.Body, searchResult); err != nil {
+ return nil, err
+ }
+
+ // Determine last page
+ if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 {
+ return nil, EOS
+ }
+
+ return searchResult, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search.go
new file mode 100644
index 0000000..eefb492
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search.go
@@ -0,0 +1,539 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// Search for documents in Elasticsearch.
+type SearchService struct {
+ client *Client
+ searchSource *SearchSource
+ source interface{}
+ pretty bool
+ searchType string
+ indices []string
+ queryHint string
+ routing string
+ preference string
+ types []string
+}
+
+// NewSearchService creates a new service for searching in Elasticsearch.
+// You typically do not create the service yourself manually, but access
+// it via client.Search().
+func NewSearchService(client *Client) *SearchService {
+ builder := &SearchService{
+ client: client,
+ searchSource: NewSearchSource(),
+ }
+ return builder
+}
+
+// SearchSource sets the search source builder to use with this service.
+func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService {
+ s.searchSource = searchSource
+ if s.searchSource == nil {
+ s.searchSource = NewSearchSource()
+ }
+ return s
+}
+
+// Source allows the user to set the request body manually without using
+// any of the structs and interfaces in Elastic.
+func (s *SearchService) Source(source interface{}) *SearchService {
+ s.source = source
+ return s
+}
+
+// Index sets the name of the index to use for search.
+func (s *SearchService) Index(index string) *SearchService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, index)
+ return s
+}
+
+// Indices sets the names of the indices to use for search.
+func (s *SearchService) Indices(indices ...string) *SearchService {
+ if s.indices == nil {
+ s.indices = make([]string, 0)
+ }
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+// Type restricts the search for the given type.
+func (s *SearchService) Type(typ string) *SearchService {
+ if s.types == nil {
+ s.types = []string{typ}
+ } else {
+ s.types = append(s.types, typ)
+ }
+ return s
+}
+
+// Types allows to restrict the search to a list of types.
+func (s *SearchService) Types(types ...string) *SearchService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ }
+ s.types = append(s.types, types...)
+ return s
+}
+
+// Pretty enables the caller to indent the JSON output.
+func (s *SearchService) Pretty(pretty bool) *SearchService {
+ s.pretty = pretty
+ return s
+}
+
+// Timeout sets the timeout to use, e.g. "1s" or "1000ms".
+func (s *SearchService) Timeout(timeout string) *SearchService {
+ s.searchSource = s.searchSource.Timeout(timeout)
+ return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
+ s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis)
+ return s
+}
+
+// SearchType sets the search operation type. Valid values are:
+// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch",
+// "dfs_query_and_fetch", "count", "scan".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-search-type.html#search-request-search-type
+// for details.
+func (s *SearchService) SearchType(searchType string) *SearchService {
+ s.searchType = searchType
+ return s
+}
+
+// Routing allows for (a comma-separated) list of specific routing values.
+func (s *SearchService) Routing(routings ...string) *SearchService {
+ s.routing = strings.Join(routings, ",")
+ return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: "random").
+func (s *SearchService) Preference(preference string) *SearchService {
+ s.preference = preference
+ return s
+}
+
+func (s *SearchService) QueryHint(queryHint string) *SearchService {
+ s.queryHint = queryHint
+ return s
+}
+
+// Query sets the query to perform, e.g. MatchAllQuery.
+func (s *SearchService) Query(query Query) *SearchService {
+ s.searchSource = s.searchSource.Query(query)
+ return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html
+// for details.
+func (s *SearchService) PostFilter(postFilter Filter) *SearchService {
+ s.searchSource = s.searchSource.PostFilter(postFilter)
+ return s
+}
+
+// FetchSource indicates whether the response should contain the stored
+// _source for every hit.
+func (s *SearchService) FetchSource(fetchSource bool) *SearchService {
+ s.searchSource = s.searchSource.FetchSource(fetchSource)
+ return s
+}
+
+// FetchSourceContext indicates how the _source should be fetched.
+func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService {
+ s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext)
+ return s
+}
+
+// Highlight sets the highlighting. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for details.
+func (s *SearchService) Highlight(highlight *Highlight) *SearchService {
+ s.searchSource = s.searchSource.Highlight(highlight)
+ return s
+}
+
+// GlobalSuggestText sets the global text for suggesters. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html#global-suggest
+// for details.
+func (s *SearchService) GlobalSuggestText(globalText string) *SearchService {
+ s.searchSource = s.searchSource.GlobalSuggestText(globalText)
+ return s
+}
+
+// Suggester sets the suggester. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html
+// for details.
+func (s *SearchService) Suggester(suggester Suggester) *SearchService {
+ s.searchSource = s.searchSource.Suggester(suggester)
+ return s
+}
+
+// Facet adds a facet to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+// to get an overview of Elasticsearch facets.
+func (s *SearchService) Facet(name string, facet Facet) *SearchService {
+ s.searchSource = s.searchSource.Facet(name, facet)
+ return s
+}
+
+// Aggregation adds an aggregation to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+// for an overview of aggregations in Elasticsearch.
+func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService {
+ s.searchSource = s.searchSource.Aggregation(name, aggregation)
+ return s
+}
+
+// MinScore excludes documents which have a score less than the minimum
+// specified here. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-min-score.html.
+func (s *SearchService) MinScore(minScore float64) *SearchService {
+ s.searchSource = s.searchSource.MinScore(minScore)
+ return s
+}
+
+// From defines the offset from the first result you want to fetch.
+// Use it in combination with Size to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) From(from int) *SearchService {
+ s.searchSource = s.searchSource.From(from)
+ return s
+}
+
+// Size defines the maximum number of hits to be returned.
+// Use it in combination with From to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) Size(size int) *SearchService {
+ s.searchSource = s.searchSource.Size(size)
+ return s
+}
+
+// Explain can be enabled to provide an explanation for each hit and how its
+// score was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html
+// for details.
+func (s *SearchService) Explain(explain bool) *SearchService {
+ s.searchSource = s.searchSource.Explain(explain)
+ return s
+}
+
+// Version can be set to true to return a version for each search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html.
+func (s *SearchService) Version(version bool) *SearchService {
+ s.searchSource = s.searchSource.Version(version)
+ return s
+}
+
+// Sort the results by the given field, in the given order.
+// Use the alternative SortWithInfo to use a struct to define the sorting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) Sort(field string, ascending bool) *SearchService {
+ s.searchSource = s.searchSource.Sort(field, ascending)
+ return s
+}
+
+// SortWithInfo defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortWithInfo(info SortInfo) *SearchService {
+ s.searchSource = s.searchSource.SortWithInfo(info)
+ return s
+}
+
+// SortBy defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortBy(sorter ...Sorter) *SearchService {
+ s.searchSource = s.searchSource.SortBy(sorter...)
+ return s
+}
+
+// Fields tells Elasticsearch to only load specific fields from a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html.
+func (s *SearchService) Fields(fields ...string) *SearchService {
+ s.searchSource = s.searchSource.Fields(fields...)
+ return s
+}
+
+// Do executes the search and returns a SearchResult.
+func (s *SearchService) Do() (*SearchResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",")
+
+ // Types part
+ if len(s.types) > 0 {
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ })
+ if err != nil {
+ return nil, err
+ }
+ typesPart = append(typesPart, typ)
+ }
+ path += "/"
+ path += strings.Join(typesPart, ",")
+ }
+
+ // Search
+ path += "/_search"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.searchType != "" {
+ params.Set("search_type", s.searchType)
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+
+ // Perform request
+ var body interface{}
+ if s.source != nil {
+ body = s.source
+ } else {
+ body = s.searchSource.Source()
+ }
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return search results
+ ret := new(SearchResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// SearchResult is the result of a search in Elasticsearch.
+type SearchResult struct {
+ TookInMillis int64 `json:"took"` // search time in milliseconds
+ ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations
+ Hits *SearchHits `json:"hits"` // the actual search hits
+ Suggest SearchSuggest `json:"suggest"` // results from suggesters
+ Facets SearchFacets `json:"facets"` // results from facets
+ Aggregations Aggregations `json:"aggregations"` // results from aggregations
+ TimedOut bool `json:"timed_out"` // true if the search timed out
+ Error string `json:"error,omitempty"` // used in MultiSearch only
+}
+
+// TotalHits is a convenience function to return the number of hits for
+// a search result.
+func (r *SearchResult) TotalHits() int64 {
+ if r.Hits != nil {
+ return r.Hits.TotalHits
+ }
+ return 0
+}
+
+// Each is a utility function to iterate over all hits. It saves you from
+// checking for nil values. Notice that Each will ignore errors in
+// serializing JSON.
+func (r *SearchResult) Each(typ reflect.Type) []interface{} {
+ if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 {
+ return nil
+ }
+ slice := make([]interface{}, 0)
+ for _, hit := range r.Hits.Hits {
+ v := reflect.New(typ).Elem()
+ if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil {
+ slice = append(slice, v.Interface())
+ }
+ }
+ return slice
+}
+
+// SearchHits specifies the list of search hits.
+type SearchHits struct {
+ TotalHits int64 `json:"total"` // total number of hits found
+ MaxScore *float64 `json:"max_score"` // maximum score of all hits
+ Hits []*SearchHit `json:"hits"` // the actual hits returned
+}
+
+// SearchHit is a single hit.
+type SearchHit struct {
+ Score *float64 `json:"_score"` // computed score
+ Index string `json:"_index"` // index name
+ Id string `json:"_id"` // external or internal
+ Type string `json:"_type"` // type
+ Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
+ Sort []interface{} `json:"sort"` // sort information
+ Highlight SearchHitHighlight `json:"highlight"` // highlighter information
+ Source *json.RawMessage `json:"_source"` // stored document source
+ Fields map[string]interface{} `json:"fields"` // returned fields
+ Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed
+ MatchedQueries []string `json:"matched_queries"` // matched queries
+ InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0
+
+ // Shard
+ // HighlightFields
+ // SortValues
+ // MatchedFilters
+}
+
+type SearchHitInnerHits struct {
+ Hits *SearchHits `json:"hits"`
+}
+
+// SearchExplanation explains how the score for a hit was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html.
+type SearchExplanation struct {
+ Value float64 `json:"value"` // e.g. 1.0
+ Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:"
+ Details []SearchExplanation `json:"details,omitempty"` // recursive details
+}
+
+// Suggest
+
+// SearchSuggest is a map of suggestions.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggest map[string][]SearchSuggestion
+
+// SearchSuggestion is a single search suggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestion struct {
+ Text string `json:"text"`
+ Offset int `json:"offset"`
+ Length int `json:"length"`
+ Options []SearchSuggestionOption `json:"options"`
+}
+
+// SearchSuggestionOption is an option of a SearchSuggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestionOption struct {
+ Text string `json:"text"`
+ Score float32 `json:"score"`
+ Freq int `json:"freq"`
+ Payload interface{} `json:"payload"`
+}
+
+// Facets
+
+// SearchFacets is a map of facets.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacets map[string]*SearchFacet
+
+// SearchFacet is a single facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacet struct {
+ Type string `json:"_type"`
+ Missing int `json:"missing"`
+ Total int `json:"total"`
+ Other int `json:"other"`
+ Terms []searchFacetTerm `json:"terms"`
+ Ranges []searchFacetRange `json:"ranges"`
+ Entries []searchFacetEntry `json:"entries"`
+}
+
+// searchFacetTerm is the result of a terms/terms_stats facet.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-facet.html
+// and https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-stats-facet.html.
+type searchFacetTerm struct {
+ Term interface{} `json:"term"`
+ Count int `json:"count"`
+
+ // The following fields are returned for terms_stats facets.
+ // See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/search-facets-terms-stats-facet.html.
+
+ TotalCount int `json:"total_count"`
+ Min float64 `json:"min"`
+ Max float64 `json:"max"`
+ Total float64 `json:"total"`
+ Mean float64 `json:"mean"`
+}
+
+// searchFacetRange is the result of a range facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-range-facet.html.
+type searchFacetRange struct {
+ From *float64 `json:"from"`
+ FromStr *string `json:"from_str"`
+ To *float64 `json:"to"`
+ ToStr *string `json:"to_str"`
+ Count int `json:"count"`
+ Min *float64 `json:"min"`
+ Max *float64 `json:"max"`
+ TotalCount int `json:"total_count"`
+ Total *float64 `json:"total"`
+ Mean *float64 `json:"mean"`
+}
+
+// searchFacetEntry is a general facet entry.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+type searchFacetEntry struct {
+ // Key for this facet, e.g. in histograms
+ Key interface{} `json:"key"`
+ // Date histograms contain the number of milliseconds as date:
+ // If e.Time = 1293840000000, then: Time.at(1293840000000/1000) => 2011-01-01
+ Time int64 `json:"time"`
+ // Number of hits for this facet
+ Count int `json:"count"`
+ // Min is either a string like "Infinity" or a float64.
+ // This is returned with some DateHistogram facets.
+ Min interface{} `json:"min,omitempty"`
+ // Max is either a string like "-Infinity" or a float64
+ // This is returned with some DateHistogram facets.
+ Max interface{} `json:"max,omitempty"`
+ // Total is the sum of all entries on the recorded Time
+ // This is returned with some DateHistogram facets.
+ Total float64 `json:"total,omitempty"`
+ // TotalCount is the number of entries for Total
+ // This is returned with some DateHistogram facets.
+ TotalCount int `json:"total_count,omitempty"`
+ // Mean is the mean value
+ // This is returned with some DateHistogram facets.
+ Mean float64 `json:"mean,omitempty"`
+}
+
+// Aggregations (see search_aggs.go)
+
+// Highlighting
+
+// SearchHitHighlight is the highlight information of a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for a general discussion of highlighting.
+type SearchHitHighlight map[string][]string
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs.go
new file mode 100644
index 0000000..fb8b2c8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs.go
@@ -0,0 +1,960 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+// Aggregations can be seen as a unit-of-work that build
+// analytic information over a set of documents. It is
+// (in many senses) the follow-up of facets in Elasticsearch.
+// For more details about aggregations, visit:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+type Aggregation interface {
+ Source() interface{}
+}
+
+// Aggregations is a list of aggregations that are part of a search result.
+type Aggregations map[string]*json.RawMessage
+
+// Min returns min aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Max returns max aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Sum returns sum aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Avg returns average aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ValueCount returns value-count aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Cardinality returns cardinality aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationValueMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Stats returns stats aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ExtendedStats returns extended stats aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationExtendedStatsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Percentiles returns percentiles results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// PercentileRanks returns percentile ranks results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationPercentilesMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// TopHits returns top-hits aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationTopHitsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Global returns global results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Filter returns filter results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Filters returns filters results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketFilters)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Missing returns missing results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Nested returns nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html
+func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// ReverseNested returns reverse-nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html
+func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Children returns children results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationSingleBucket)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Terms returns terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// SignificantTerms returns significant terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketSignificantTerms)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Range returns range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// KeyedRange returns keyed range aggregation results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html.
+func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyedRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// DateRange returns date range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// IPv4Range returns IPv4 range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html
+func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// Histogram returns histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketHistogramItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// DateHistogram returns date histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketHistogramItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoBounds returns geo-bounds aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationGeoBoundsMetric)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoHash returns geo-hash aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html
+func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketKeyItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// GeoDistance returns geo distance aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html
+func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
+ if raw, found := a[name]; found {
+ agg := new(AggregationBucketRangeItems)
+ if raw == nil {
+ return agg, true
+ }
+ if err := json.Unmarshal(*raw, agg); err == nil {
+ return agg, true
+ }
+ }
+ return nil, false
+}
+
+// -- Single value metric --
+
+// AggregationValueMetric is a single-value metric, returned e.g. by a
+// Min or Max aggregation.
+type AggregationValueMetric struct {
+ Aggregations
+
+ Value *float64 //`json:"value"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.
+func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["value"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Value)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Stats metric --
+
+// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.
+type AggregationStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ Min *float64 //`json:"min,omitempty"`
+ Max *float64 //`json:"max,omitempty"`
+ Avg *float64 //`json:"avg,omitempty"`
+ Sum *float64 //`json:"sum,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.
+func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Extended stats metric --
+
+// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.
+type AggregationExtendedStatsMetric struct {
+ Aggregations
+
+ Count int64 // `json:"count"`
+ Min *float64 //`json:"min,omitempty"`
+ Max *float64 //`json:"max,omitempty"`
+ Avg *float64 //`json:"avg,omitempty"`
+ Sum *float64 //`json:"sum,omitempty"`
+ SumOfSquares *float64 //`json:"sum_of_squares,omitempty"`
+ Variance *float64 //`json:"variance,omitempty"`
+ StdDeviation *float64 //`json:"std_deviation,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.
+func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Count)
+ }
+ if v, ok := aggs["min"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Min)
+ }
+ if v, ok := aggs["max"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Max)
+ }
+ if v, ok := aggs["avg"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Avg)
+ }
+ if v, ok := aggs["sum"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Sum)
+ }
+ if v, ok := aggs["sum_of_squares"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfSquares)
+ }
+ if v, ok := aggs["variance"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Variance)
+ }
+ if v, ok := aggs["std_deviation"]; ok && v != nil {
+ json.Unmarshal(*v, &a.StdDeviation)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Percentiles metric --
+
+// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.
+type AggregationPercentilesMetric struct {
+ Aggregations
+
+ Values map[string]float64 // `json:"values"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.
+func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["values"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Values)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Top-hits metric --
+
+// AggregationTopHitsMetric is a metric returned by a TopHits aggregation.
+type AggregationTopHitsMetric struct {
+ Aggregations
+
+ Hits *SearchHits //`json:"hits"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.
+func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ a.Aggregations = aggs
+ a.Hits = new(SearchHits)
+ if v, ok := aggs["hits"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Hits)
+ }
+ return nil
+}
+
+// -- Geo-bounds metric --
+
+// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.
+type AggregationGeoBoundsMetric struct {
+ Aggregations
+
+ Bounds struct {
+ TopLeft struct {
+ Latitude float64 `json:"lat"`
+ Longitude float64 `json:"lon"`
+ } `json:"top_left"`
+ BottomRight struct {
+ Latitude float64 `json:"lat"`
+ Longitude float64 `json:"lon"`
+ } `json:"bottom_right"`
+ } `json:"bounds"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.
+func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["bounds"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Bounds)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Single bucket --
+
+// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.
+type AggregationSingleBucket struct {
+ Aggregations
+
+ DocCount int64 // `json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.
+func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket range items --
+
+// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned
+// with a range aggregation.
+type AggregationBucketRangeItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets []*AggregationBucketRangeItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned
+// with a keyed range aggregation.
+type AggregationBucketKeyedRangeItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.
+type AggregationBucketRangeItem struct {
+ Aggregations
+
+ Key string //`json:"key"`
+ DocCount int64 //`json:"doc_count"`
+ From *float64 //`json:"from"`
+ FromAsString string //`json:"from_as_string"`
+ To *float64 //`json:"to"`
+ ToAsString string //`json:"to_as_string"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.
+func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["from"]; ok && v != nil {
+ json.Unmarshal(*v, &a.From)
+ }
+ if v, ok := aggs["from_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.FromAsString)
+ }
+ if v, ok := aggs["to"]; ok && v != nil {
+ json.Unmarshal(*v, &a.To)
+ }
+ if v, ok := aggs["to_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.ToAsString)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket key items --
+
+// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned
+// with a terms aggregation.
+type AggregationBucketKeyItems struct {
+ Aggregations
+
+ DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"`
+ SumOfOtherDocCount int64 //`json:"sum_other_doc_count"`
+ Buckets []*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.
+func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+ }
+ if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.SumOfOtherDocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.
+type AggregationBucketKeyItem struct {
+ Aggregations
+
+ Key interface{} //`json:"key"`
+ KeyNumber json.Number
+ DocCount int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.
+func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ if err := dec.Decode(&aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ json.Unmarshal(*v, &a.KeyNumber)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket types for significant terms --
+
+// AggregationBucketSignificantTerms is a bucket aggregation returned
+// with a significant terms aggregation.
+type AggregationBucketSignificantTerms struct {
+ Aggregations
+
+ DocCount int64 //`json:"doc_count"`
+ Buckets []*AggregationBucketSignificantTerm //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.
+func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.
+type AggregationBucketSignificantTerm struct {
+ Aggregations
+
+ Key string //`json:"key"`
+ DocCount int64 //`json:"doc_count"`
+ BgCount int64 //`json:"bg_count"`
+ Score float64 //`json:"score"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.
+func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ if v, ok := aggs["bg_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.BgCount)
+ }
+ if v, ok := aggs["score"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Score)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket filters --
+
+// AggregationBucketFilters is a multi-bucket aggregation that is returned
+// with a filters aggregation.
+type AggregationBucketFilters struct {
+ Aggregations
+
+ Buckets []*AggregationBucketKeyItem //`json:"buckets"`
+ NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.
+func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ json.Unmarshal(*v, &a.NamedBuckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// -- Bucket histogram items --
+
+// AggregationBucketHistogramItems is a bucket aggregation that is returned
+// with a date histogram aggregation.
+type AggregationBucketHistogramItems struct {
+ Aggregations
+
+ Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
+func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["buckets"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Buckets)
+ }
+ a.Aggregations = aggs
+ return nil
+}
+
+// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.
+type AggregationBucketHistogramItem struct {
+ Aggregations
+
+ Key int64 //`json:"key"`
+ KeyAsString *string //`json:"key_as_string"`
+ DocCount int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.
+func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error {
+ var aggs map[string]*json.RawMessage
+ if err := json.Unmarshal(data, &aggs); err != nil {
+ return err
+ }
+ if v, ok := aggs["key"]; ok && v != nil {
+ json.Unmarshal(*v, &a.Key)
+ }
+ if v, ok := aggs["key_as_string"]; ok && v != nil {
+ json.Unmarshal(*v, &a.KeyAsString)
+ }
+ if v, ok := aggs["doc_count"]; ok && v != nil {
+ json.Unmarshal(*v, &a.DocCount)
+ }
+ a.Aggregations = aggs
+ return nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg.go
new file mode 100644
index 0000000..7b01ee0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_avg.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AvgAggregation is a single-value metrics aggregation that computes
+// the average of numeric values that are extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+type AvgAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewAvgAggregation() AvgAggregation {
+ a := AvgAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a AvgAggregation) Field(field string) AvgAggregation {
+ a.field = field
+ return a
+}
+
+func (a AvgAggregation) Script(script string) AvgAggregation {
+ a.script = script
+ return a
+}
+
+func (a AvgAggregation) ScriptFile(scriptFile string) AvgAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a AvgAggregation) Lang(lang string) AvgAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a AvgAggregation) Format(format string) AvgAggregation {
+ a.format = format
+ return a
+}
+
+func (a AvgAggregation) Param(name string, value interface{}) AvgAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a AvgAggregation) SubAggregation(name string, subAggregation Aggregation) AvgAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a AvgAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "avg_grade" : { "avg" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "avg" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["avg"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality.go
new file mode 100644
index 0000000..5d64134
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_cardinality.go
@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CardinalityAggregation is a single-value metrics aggregation that
+// calculates an approximate count of distinct values.
+// Values can be extracted either from specific fields in the document
+// or generated by a script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+type CardinalityAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ precisionThreshold *int64
+ rehash *bool
+}
+
+func NewCardinalityAggregation() CardinalityAggregation {
+ a := CardinalityAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a CardinalityAggregation) Field(field string) CardinalityAggregation {
+ a.field = field
+ return a
+}
+
+func (a CardinalityAggregation) Script(script string) CardinalityAggregation {
+ a.script = script
+ return a
+}
+
+func (a CardinalityAggregation) ScriptFile(scriptFile string) CardinalityAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a CardinalityAggregation) Lang(lang string) CardinalityAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a CardinalityAggregation) Format(format string) CardinalityAggregation {
+ a.format = format
+ return a
+}
+
+func (a CardinalityAggregation) Param(name string, value interface{}) CardinalityAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) CardinalityAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a CardinalityAggregation) PrecisionThreshold(threshold int64) CardinalityAggregation {
+ a.precisionThreshold = &threshold
+ return a
+}
+
+func (a CardinalityAggregation) Rehash(rehash bool) CardinalityAggregation {
+ a.rehash = &rehash
+ return a
+}
+
+func (a CardinalityAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "author_count" : {
+ // "cardinality" : { "field" : "author" }
+ // }
+ // }
+ // }
+ // This method returns only the "cardinality" : { "field" : "author" } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["cardinality"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if a.precisionThreshold != nil {
+ opts["precision_threshold"] = *a.precisionThreshold
+ }
+ if a.rehash != nil {
+ opts["rehash"] = *a.rehash
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children.go
new file mode 100644
index 0000000..f9cc918
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_children.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ChildrenAggregation is a special single bucket aggregation that enables
+// aggregating from buckets on parent document types to buckets on child documents.
+// It is available from 1.4.0.Beta1 upwards.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+type ChildrenAggregation struct {
+ typ string
+ subAggregations map[string]Aggregation
+}
+
+func NewChildrenAggregation() ChildrenAggregation {
+ a := ChildrenAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a ChildrenAggregation) Type(typ string) ChildrenAggregation {
+ a.typ = typ
+ return a
+}
+
+func (a ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) ChildrenAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a ChildrenAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "to-answers" : {
+ // "type" : "answer"
+ // }
+ // }
+ // }
+ // This method returns only the { "type" : ... } part.
+
+ source := make(map[string]interface{})
+ source["type"] = a.typ
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram.go
new file mode 100644
index 0000000..9b593bd
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_histogram.go
@@ -0,0 +1,303 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DateHistogramAggregation is a multi-bucket aggregation similar to the
+// histogram except it can only be applied on date values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+type DateHistogramAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+
+ interval string
+ order string
+ orderAsc bool
+ minDocCount *int64
+ extendedBoundsMin interface{}
+ extendedBoundsMax interface{}
+ preZone string
+ postZone string
+ preZoneAdjustLargeInterval *bool
+ format string
+ preOffset int64
+ postOffset int64
+ factor *float32
+}
+
+func NewDateHistogramAggregation() DateHistogramAggregation {
+ a := DateHistogramAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a DateHistogramAggregation) Field(field string) DateHistogramAggregation {
+ a.field = field
+ return a
+}
+
+func (a DateHistogramAggregation) Script(script string) DateHistogramAggregation {
+ a.script = script
+ return a
+}
+
+func (a DateHistogramAggregation) ScriptFile(scriptFile string) DateHistogramAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a DateHistogramAggregation) Lang(lang string) DateHistogramAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a DateHistogramAggregation) Param(name string, value interface{}) DateHistogramAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) DateHistogramAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (a DateHistogramAggregation) Interval(interval string) DateHistogramAggregation {
+ a.interval = interval
+ return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a DateHistogramAggregation) Order(order string, asc bool) DateHistogramAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) OrderByCount(asc bool) DateHistogramAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) OrderByCountAsc() DateHistogramAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a DateHistogramAggregation) OrderByCountDesc() DateHistogramAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a DateHistogramAggregation) OrderByKey(asc bool) DateHistogramAggregation {
+ // "order" : { "_key" : "asc" }
+ a.order = "_key"
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) OrderByKeyAsc() DateHistogramAggregation {
+ return a.OrderByKey(true)
+}
+
+func (a DateHistogramAggregation) OrderByKeyDesc() DateHistogramAggregation {
+ return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) DateHistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) DateHistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a DateHistogramAggregation) MinDocCount(minDocCount int64) DateHistogramAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a DateHistogramAggregation) PreZone(preZone string) DateHistogramAggregation {
+ a.preZone = preZone
+ return a
+}
+
+func (a DateHistogramAggregation) PostZone(postZone string) DateHistogramAggregation {
+ a.postZone = postZone
+ return a
+}
+
+func (a DateHistogramAggregation) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramAggregation {
+ a.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+ return a
+}
+
+func (a DateHistogramAggregation) PreOffset(preOffset int64) DateHistogramAggregation {
+ a.preOffset = preOffset
+ return a
+}
+
+func (a DateHistogramAggregation) PostOffset(postOffset int64) DateHistogramAggregation {
+ a.postOffset = postOffset
+ return a
+}
+
+func (a DateHistogramAggregation) Factor(factor float32) DateHistogramAggregation {
+ a.factor = &factor
+ return a
+}
+
+func (a DateHistogramAggregation) Format(format string) DateHistogramAggregation {
+ a.format = format
+ return a
+}
+
+// ExtendedBoundsMin accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMin(min interface{}) DateHistogramAggregation {
+ a.extendedBoundsMin = min
+ return a
+}
+
+// ExtendedBoundsMax accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMax(max interface{}) DateHistogramAggregation {
+ a.extendedBoundsMax = max
+ return a
+}
+
+func (a DateHistogramAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "articles_over_time" : {
+ // "date_histogram" : {
+ // "field" : "date",
+ // "interval" : "month"
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "date_histogram" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["date_histogram"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ opts["interval"] = a.interval
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if a.preZone != "" {
+ opts["pre_zone"] = a.preZone
+ }
+ if a.postZone != "" {
+ opts["post_zone"] = a.postZone
+ }
+ if a.preZoneAdjustLargeInterval != nil {
+ opts["pre_zone_adjust_large_interval"] = *a.preZoneAdjustLargeInterval
+ }
+ if a.preOffset != 0 {
+ opts["pre_offset"] = a.preOffset
+ }
+ if a.postOffset != 0 {
+ opts["post_offset"] = a.postOffset
+ }
+ if a.factor != nil {
+ opts["factor"] = *a.factor
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+ bounds := make(map[string]interface{})
+ if a.extendedBoundsMin != nil {
+ bounds["min"] = a.extendedBoundsMin
+ }
+ if a.extendedBoundsMax != nil {
+ bounds["max"] = a.extendedBoundsMax
+ }
+ opts["extended_bounds"] = bounds
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range.go
new file mode 100644
index 0000000..c0c550e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_date_range.go
@@ -0,0 +1,243 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// DateRangeAggregation is a range aggregation that is dedicated for
+// date values. The main difference between this aggregation and the
+// normal range aggregation is that the from and to values can be expressed
+// in Date Math expressions, and it is also possible to specify a
+// date format by which the from and to response fields will be returned.
+// Note that this aggregration includes the from value and excludes the to
+// value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+type DateRangeAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ keyed *bool
+ unmapped *bool
+ format string
+ entries []DateRangeAggregationEntry
+}
+
+type DateRangeAggregationEntry struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewDateRangeAggregation() DateRangeAggregation {
+ a := DateRangeAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]DateRangeAggregationEntry, 0),
+ }
+ return a
+}
+
+func (a DateRangeAggregation) Field(field string) DateRangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a DateRangeAggregation) Script(script string) DateRangeAggregation {
+ a.script = script
+ return a
+}
+
+func (a DateRangeAggregation) ScriptFile(scriptFile string) DateRangeAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a DateRangeAggregation) Lang(lang string) DateRangeAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a DateRangeAggregation) Param(name string, value interface{}) DateRangeAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) DateRangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a DateRangeAggregation) Keyed(keyed bool) DateRangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a DateRangeAggregation) Unmapped(unmapped bool) DateRangeAggregation {
+ a.unmapped = &unmapped
+ return a
+}
+
+func (a DateRangeAggregation) Format(format string) DateRangeAggregation {
+ a.format = format
+ return a
+}
+
+func (a DateRangeAggregation) AddRange(from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedTo(from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFrom(to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) Lt(to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) LtWithKey(key string, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) Between(from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a DateRangeAggregation) Gt(from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) GtWithKey(key string, from interface{}) DateRangeAggregation {
+ a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a DateRangeAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "range" : {
+ // "date_range": {
+ // "field": "date",
+ // "format": "MM-yyy",
+ // "ranges": [
+ // { "to": "now-10M/M" },
+ // { "from": "now-10M/M" }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "date_range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["date_range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+ if a.unmapped != nil {
+ opts["unmapped"] = *a.unmapped
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats.go
new file mode 100644
index 0000000..76cd572
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_extended_stats.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that
+// computes stats over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+type ExtendedStatsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewExtendedStatsAggregation() ExtendedStatsAggregation {
+ a := ExtendedStatsAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a ExtendedStatsAggregation) Field(field string) ExtendedStatsAggregation {
+ a.field = field
+ return a
+}
+
+func (a ExtendedStatsAggregation) Script(script string) ExtendedStatsAggregation {
+ a.script = script
+ return a
+}
+
+func (a ExtendedStatsAggregation) ScriptFile(scriptFile string) ExtendedStatsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a ExtendedStatsAggregation) Lang(lang string) ExtendedStatsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a ExtendedStatsAggregation) Format(format string) ExtendedStatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a ExtendedStatsAggregation) Param(name string, value interface{}) ExtendedStatsAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) ExtendedStatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a ExtendedStatsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_stats" : { "extended_stats" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "extended_stats" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["extended_stats"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter.go
new file mode 100644
index 0000000..d165f35
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filter.go
@@ -0,0 +1,58 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FilterAggregation defines a single bucket of all the documents
+// in the current document set context that match a specified filter.
+// Often this will be used to narrow down the current aggregation context
+// to a specific set of documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+type FilterAggregation struct {
+ filter Filter
+ subAggregations map[string]Aggregation
+}
+
+func NewFilterAggregation() FilterAggregation {
+ a := FilterAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a FilterAggregation) SubAggregation(name string, subAggregation Aggregation) FilterAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a FilterAggregation) Filter(filter Filter) FilterAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a FilterAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "in_stock_products" : {
+ // "filter" : { "range" : { "stock" : { "gt" : 0 } } }
+ // }
+ // }
+ // }
+ // This method returns only the { "filter" : {} } part.
+
+ source := make(map[string]interface{})
+ source["filter"] = a.filter.Source()
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters.go
new file mode 100644
index 0000000..81da4cc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_filters.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FiltersAggregation defines a multi bucket aggregations where each bucket
+// is associated with a filter. Each bucket will collect all documents that
+// match its associated filter.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+type FiltersAggregation struct {
+ filters []Filter
+ subAggregations map[string]Aggregation
+}
+
+func NewFiltersAggregation() FiltersAggregation {
+ return FiltersAggregation{
+ filters: make([]Filter, 0),
+ subAggregations: make(map[string]Aggregation),
+ }
+}
+
+func (a FiltersAggregation) Filter(filter Filter) FiltersAggregation {
+ a.filters = append(a.filters, filter)
+ return a
+}
+
+func (a FiltersAggregation) Filters(filters ...Filter) FiltersAggregation {
+ if len(filters) > 0 {
+ a.filters = append(a.filters, filters...)
+ }
+ return a
+}
+
+func (a FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) FiltersAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a FiltersAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "messages" : {
+ // "filters" : {
+ // "filters" : {
+ // "errors" : { "term" : { "body" : "error" }},
+ // "warnings" : { "term" : { "body" : "warning" }}
+ // }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the (outer) { "filters" : {} } part.
+
+ source := make(map[string]interface{})
+ filters := make(map[string]interface{})
+ source["filters"] = filters
+
+ arr := make([]interface{}, len(a.filters))
+ for i, filter := range a.filters {
+ arr[i] = filter.Source()
+ }
+ filters["filters"] = arr
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds.go
new file mode 100644
index 0000000..33d9eb9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_bounds.go
@@ -0,0 +1,104 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoBoundsAggregation is a metric aggregation that computes the
+// bounding box containing all geo_point values for a field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+type GeoBoundsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ wrapLongitude *bool
+}
+
+func NewGeoBoundsAggregation() GeoBoundsAggregation {
+ a := GeoBoundsAggregation{}
+ return a
+}
+
+func (a GeoBoundsAggregation) Field(field string) GeoBoundsAggregation {
+ a.field = field
+ return a
+}
+
+func (a GeoBoundsAggregation) Script(script string) GeoBoundsAggregation {
+ a.script = script
+ return a
+}
+
+func (a GeoBoundsAggregation) ScriptFile(scriptFile string) GeoBoundsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a GeoBoundsAggregation) Lang(lang string) GeoBoundsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a GeoBoundsAggregation) Params(params map[string]interface{}) GeoBoundsAggregation {
+ a.params = params
+ return a
+}
+
+func (a GeoBoundsAggregation) Param(name string, value interface{}) GeoBoundsAggregation {
+ if a.params == nil {
+ a.params = make(map[string]interface{})
+ }
+ a.params[name] = value
+ return a
+}
+
+func (a GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) GeoBoundsAggregation {
+ a.wrapLongitude = &wrapLongitude
+ return a
+}
+
+func (a GeoBoundsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : { "business_type" : "shop" }
+ // },
+ // "aggs" : {
+ // "viewport" : {
+ // "geo_bounds" : {
+ // "field" : "location"
+ // "wrap_longitude" : "true"
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "geo_bounds" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geo_bounds"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.params != nil && len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if a.wrapLongitude != nil {
+ opts["wrap_longitude"] = *a.wrapLongitude
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance.go
new file mode 100644
index 0000000..d63af53
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_geo_distance.go
@@ -0,0 +1,180 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields
+// and conceptually works very similar to the range aggregation.
+// The user can define a point of origin and a set of distance range buckets.
+// The aggregation evaluate the distance of each document value from
+// the origin point and determines the buckets it belongs to based on
+// the ranges (a document belongs to a bucket if the distance between the
+// document and the origin falls within the distance range of the bucket).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html
+type GeoDistanceAggregation struct {
+ field string
+ unit string
+ distanceType string
+ point string
+ ranges []geoDistAggRange
+ subAggregations map[string]Aggregation
+}
+
+type geoDistAggRange struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewGeoDistanceAggregation() GeoDistanceAggregation {
+ a := GeoDistanceAggregation{
+ subAggregations: make(map[string]Aggregation),
+ ranges: make([]geoDistAggRange, 0),
+ }
+ return a
+}
+
+func (a GeoDistanceAggregation) Field(field string) GeoDistanceAggregation {
+ a.field = field
+ return a
+}
+
+func (a GeoDistanceAggregation) Unit(unit string) GeoDistanceAggregation {
+ a.unit = unit
+ return a
+}
+
+func (a GeoDistanceAggregation) DistanceType(distanceType string) GeoDistanceAggregation {
+ a.distanceType = distanceType
+ return a
+}
+
+func (a GeoDistanceAggregation) Point(latLon string) GeoDistanceAggregation {
+ a.point = latLon
+ return a
+}
+
+func (a GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) GeoDistanceAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a GeoDistanceAggregation) AddRange(from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedTo(from float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFrom(to float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) Between(from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+ a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+ return a
+}
+
+func (a GeoDistanceAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "rings_around_amsterdam" : {
+ // "geo_distance" : {
+ // "field" : "location",
+ // "origin" : "52.3760, 4.894",
+ // "ranges" : [
+ // { "to" : 100 },
+ // { "from" : 100, "to" : 300 },
+ // { "from" : 300 }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["geo_distance"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.unit != "" {
+ opts["unit"] = a.unit
+ }
+ if a.distanceType != "" {
+ opts["distance_type"] = a.distanceType
+ }
+ if a.point != "" {
+ opts["origin"] = a.point
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range a.ranges {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["from"] = from
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case *int, *int16, *int32, *int64, *float32, *float64:
+ r["to"] = to
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global.go
new file mode 100644
index 0000000..4d56297
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_global.go
@@ -0,0 +1,56 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GlobalAggregation defines a single bucket of all the documents within
+// the search execution context. This context is defined by the indices
+// and the document types you’re searching on, but is not influenced
+// by the search query itself.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+type GlobalAggregation struct {
+ subAggregations map[string]Aggregation
+}
+
+func NewGlobalAggregation() GlobalAggregation {
+ a := GlobalAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) GlobalAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a GlobalAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "all_products" : {
+ // "global" : {},
+ // "aggs" : {
+ // "avg_price" : { "avg" : { "field" : "price" } }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "global" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["global"] = opts
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram.go
new file mode 100644
index 0000000..250d3f7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_histogram.go
@@ -0,0 +1,234 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HistogramAggregation is a multi-bucket values source based aggregation
+// that can be applied on numeric values extracted from the documents.
+// It dynamically builds fixed size (a.k.a. interval) buckets over the
+// values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+type HistogramAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+
+ interval int64
+ order string
+ orderAsc bool
+ minDocCount *int64
+ extendedBoundsMin *int64
+ extendedBoundsMax *int64
+}
+
+func NewHistogramAggregation() HistogramAggregation {
+ a := HistogramAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a HistogramAggregation) Field(field string) HistogramAggregation {
+ a.field = field
+ return a
+}
+
+func (a HistogramAggregation) Script(script string) HistogramAggregation {
+ a.script = script
+ return a
+}
+
+func (a HistogramAggregation) ScriptFile(scriptFile string) HistogramAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a HistogramAggregation) Lang(lang string) HistogramAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a HistogramAggregation) Param(name string, value interface{}) HistogramAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) HistogramAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a HistogramAggregation) Interval(interval int64) HistogramAggregation {
+ a.interval = interval
+ return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a HistogramAggregation) Order(order string, asc bool) HistogramAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) OrderByCount(asc bool) HistogramAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) OrderByCountAsc() HistogramAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a HistogramAggregation) OrderByCountDesc() HistogramAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a HistogramAggregation) OrderByKey(asc bool) HistogramAggregation {
+ // "order" : { "_key" : "asc" }
+ a.order = "_key"
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) OrderByKeyAsc() HistogramAggregation {
+ return a.OrderByKey(true)
+}
+
+func (a HistogramAggregation) OrderByKeyDesc() HistogramAggregation {
+ return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a HistogramAggregation) OrderByAggregation(aggName string, asc bool) HistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) HistogramAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a HistogramAggregation) MinDocCount(minDocCount int64) HistogramAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMin(min int64) HistogramAggregation {
+ a.extendedBoundsMin = &min
+ return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMax(max int64) HistogramAggregation {
+ a.extendedBoundsMax = &max
+ return a
+}
+
+func (a HistogramAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "prices" : {
+ // "histogram" : {
+ // "field" : "price",
+ // "interval" : 50
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "histogram" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ opts["interval"] = a.interval
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+ bounds := make(map[string]interface{})
+ if a.extendedBoundsMin != nil {
+ bounds["min"] = a.extendedBoundsMin
+ }
+ if a.extendedBoundsMax != nil {
+ bounds["max"] = a.extendedBoundsMax
+ }
+ opts["extended_bounds"] = bounds
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max.go
new file mode 100644
index 0000000..9e77ef7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_max.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MaxAggregation is a single-value metrics aggregation that keeps track and
+// returns the maximum value among the numeric values extracted from
+// the aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+type MaxAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewMaxAggregation() MaxAggregation {
+ a := MaxAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a MaxAggregation) Field(field string) MaxAggregation {
+ a.field = field
+ return a
+}
+
+func (a MaxAggregation) Script(script string) MaxAggregation {
+ a.script = script
+ return a
+}
+
+func (a MaxAggregation) ScriptFile(scriptFile string) MaxAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a MaxAggregation) Lang(lang string) MaxAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a MaxAggregation) Format(format string) MaxAggregation {
+ a.format = format
+ return a
+}
+
+func (a MaxAggregation) Param(name string, value interface{}) MaxAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a MaxAggregation) SubAggregation(name string, subAggregation Aggregation) MaxAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a MaxAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "max_price" : { "max" : { "field" : "price" } }
+ // }
+ // }
+ // This method returns only the { "max" : { "field" : "price" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["max"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min.go
new file mode 100644
index 0000000..9e00bd3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_min.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MinAggregation is a single-value metrics aggregation that keeps track and
+// returns the minimum value among numeric values extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by a
+// provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+type MinAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewMinAggregation() MinAggregation {
+ a := MinAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a MinAggregation) Field(field string) MinAggregation {
+ a.field = field
+ return a
+}
+
+func (a MinAggregation) Script(script string) MinAggregation {
+ a.script = script
+ return a
+}
+
+func (a MinAggregation) ScriptFile(scriptFile string) MinAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a MinAggregation) Lang(lang string) MinAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a MinAggregation) Format(format string) MinAggregation {
+ a.format = format
+ return a
+}
+
+func (a MinAggregation) Param(name string, value interface{}) MinAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a MinAggregation) SubAggregation(name string, subAggregation Aggregation) MinAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a MinAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "min_price" : { "min" : { "field" : "price" } }
+ // }
+ // }
+ // This method returns only the { "min" : { "field" : "price" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["min"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing.go
new file mode 100644
index 0000000..4e0f526
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_missing.go
@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MissingAggregation is a field data based single bucket aggregation,
+// that creates a bucket of all documents in the current document set context
+// that are missing a field value (effectively, missing a field or having
+// the configured NULL value set). This aggregator will often be used in
+// conjunction with other field data bucket aggregators (such as ranges)
+// to return information for all the documents that could not be placed
+// in any of the other buckets due to missing field data values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+type MissingAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+}
+
+func NewMissingAggregation() MissingAggregation {
+ a := MissingAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a MissingAggregation) Field(field string) MissingAggregation {
+ a.field = field
+ return a
+}
+
+func (a MissingAggregation) SubAggregation(name string, subAggregation Aggregation) MissingAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a MissingAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "products_without_a_price" : {
+ // "missing" : { "field" : "price" }
+ // }
+ // }
+ // }
+ // This method returns only the { "missing" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["missing"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested.go
new file mode 100644
index 0000000..feab5be
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_nested.go
@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// NestedAggregation is a special single bucket aggregation that enables
+// aggregating nested documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html
+type NestedAggregation struct {
+ path string
+ subAggregations map[string]Aggregation
+}
+
+func NewNestedAggregation() NestedAggregation {
+ a := NestedAggregation{
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a NestedAggregation) SubAggregation(name string, subAggregation Aggregation) NestedAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a NestedAggregation) Path(path string) NestedAggregation {
+ a.path = path
+ return a
+}
+
+func (a NestedAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "query" : {
+ // "match" : { "name" : "led tv" }
+ // }
+ // "aggs" : {
+ // "resellers" : {
+ // "nested" : {
+ // "path" : "resellers"
+ // },
+ // "aggs" : {
+ // "min_price" : { "min" : { "field" : "resellers.price" } }
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "nested" : {} } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["nested"] = opts
+
+ opts["path"] = a.path
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks.go
new file mode 100644
index 0000000..7e058d5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentile_ranks.go
@@ -0,0 +1,141 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentileRanksAggregation
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+type PercentileRanksAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ values []float64
+ compression *float64
+ estimator string
+}
+
+func NewPercentileRanksAggregation() PercentileRanksAggregation {
+ a := PercentileRanksAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ values: make([]float64, 0),
+ }
+ return a
+}
+
+func (a PercentileRanksAggregation) Field(field string) PercentileRanksAggregation {
+ a.field = field
+ return a
+}
+
+func (a PercentileRanksAggregation) Script(script string) PercentileRanksAggregation {
+ a.script = script
+ return a
+}
+
+func (a PercentileRanksAggregation) ScriptFile(scriptFile string) PercentileRanksAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a PercentileRanksAggregation) Lang(lang string) PercentileRanksAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a PercentileRanksAggregation) Format(format string) PercentileRanksAggregation {
+ a.format = format
+ return a
+}
+
+func (a PercentileRanksAggregation) Param(name string, value interface{}) PercentileRanksAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) PercentileRanksAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a PercentileRanksAggregation) Values(values ...float64) PercentileRanksAggregation {
+ a.values = make([]float64, 0)
+ a.values = append(a.values, values...)
+ return a
+}
+
+func (a PercentileRanksAggregation) Compression(compression float64) PercentileRanksAggregation {
+ a.compression = &compression
+ return a
+}
+
+func (a PercentileRanksAggregation) Estimator(estimator string) PercentileRanksAggregation {
+ a.estimator = estimator
+ return a
+}
+
+func (a PercentileRanksAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "load_time_outlier" : {
+ // "percentile_ranks" : {
+ // "field" : "load_time"
+ // "values" : [15, 30]
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the
+ // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["percentile_ranks"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if len(a.values) > 0 {
+ opts["values"] = a.values
+ }
+ if a.compression != nil {
+ opts["compression"] = *a.compression
+ }
+ if a.estimator != "" {
+ opts["estimator"] = a.estimator
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles.go
new file mode 100644
index 0000000..5b6cff9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_percentiles.go
@@ -0,0 +1,140 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentilesAggregation
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+type PercentilesAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ percentiles []float64
+ compression *float64
+ estimator string
+}
+
+func NewPercentilesAggregation() PercentilesAggregation {
+ a := PercentilesAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ percentiles: make([]float64, 0),
+ }
+ return a
+}
+
+func (a PercentilesAggregation) Field(field string) PercentilesAggregation {
+ a.field = field
+ return a
+}
+
+func (a PercentilesAggregation) Script(script string) PercentilesAggregation {
+ a.script = script
+ return a
+}
+
+func (a PercentilesAggregation) ScriptFile(scriptFile string) PercentilesAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a PercentilesAggregation) Lang(lang string) PercentilesAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a PercentilesAggregation) Format(format string) PercentilesAggregation {
+ a.format = format
+ return a
+}
+
+func (a PercentilesAggregation) Param(name string, value interface{}) PercentilesAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) PercentilesAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a PercentilesAggregation) Percentiles(percentiles ...float64) PercentilesAggregation {
+ a.percentiles = make([]float64, 0)
+ a.percentiles = append(a.percentiles, percentiles...)
+ return a
+}
+
+func (a PercentilesAggregation) Compression(compression float64) PercentilesAggregation {
+ a.compression = &compression
+ return a
+}
+
+func (a PercentilesAggregation) Estimator(estimator string) PercentilesAggregation {
+ a.estimator = estimator
+ return a
+}
+
+func (a PercentilesAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "load_time_outlier" : {
+ // "percentiles" : {
+ // "field" : "load_time"
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the
+ // { "percentiles" : { "field" : "load_time" } }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["percentiles"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+ if len(a.percentiles) > 0 {
+ opts["percents"] = a.percentiles
+ }
+ if a.compression != nil {
+ opts["compression"] = *a.compression
+ }
+ if a.estimator != "" {
+ opts["estimator"] = a.estimator
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range.go
new file mode 100644
index 0000000..5b05423
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_range.go
@@ -0,0 +1,232 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// RangeAggregation is a multi-bucket value source based aggregation that
+// enables the user to define a set of ranges - each representing a bucket.
+// During the aggregation process, the values extracted from each document
+// will be checked against each bucket range and "bucket" the
+// relevant/matching document. Note that this aggregration includes the
+// from value and excludes the to value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+type RangeAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+ keyed *bool
+ unmapped *bool
+ entries []rangeAggregationEntry
+}
+
+type rangeAggregationEntry struct {
+ Key string
+ From interface{}
+ To interface{}
+}
+
+func NewRangeAggregation() RangeAggregation {
+ a := RangeAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ entries: make([]rangeAggregationEntry, 0),
+ }
+ return a
+}
+
+func (a RangeAggregation) Field(field string) RangeAggregation {
+ a.field = field
+ return a
+}
+
+func (a RangeAggregation) Script(script string) RangeAggregation {
+ a.script = script
+ return a
+}
+
+func (a RangeAggregation) ScriptFile(scriptFile string) RangeAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a RangeAggregation) Lang(lang string) RangeAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a RangeAggregation) Param(name string, value interface{}) RangeAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a RangeAggregation) SubAggregation(name string, subAggregation Aggregation) RangeAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a RangeAggregation) Keyed(keyed bool) RangeAggregation {
+ a.keyed = &keyed
+ return a
+}
+
+func (a RangeAggregation) Unmapped(unmapped bool) RangeAggregation {
+ a.unmapped = &unmapped
+ return a
+}
+
+func (a RangeAggregation) AddRange(from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) AddRangeWithKey(key string, from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedTo(from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedFrom(to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) Lt(to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) LtWithKey(key string, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+ return a
+}
+
+func (a RangeAggregation) Between(from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) BetweenWithKey(key string, from, to interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+ return a
+}
+
+func (a RangeAggregation) Gt(from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) GtWithKey(key string, from interface{}) RangeAggregation {
+ a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+ return a
+}
+
+func (a RangeAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "price_ranges" : {
+ // "range" : {
+ // "field" : "price",
+ // "ranges" : [
+ // { "to" : 50 },
+ // { "from" : 50, "to" : 100 },
+ // { "from" : 100 }
+ // ]
+ // }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the { "range" : { ... } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["range"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ if a.keyed != nil {
+ opts["keyed"] = *a.keyed
+ }
+ if a.unmapped != nil {
+ opts["unmapped"] = *a.unmapped
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range a.entries {
+ r := make(map[string]interface{})
+ if ent.Key != "" {
+ r["key"] = ent.Key
+ }
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms.go
new file mode 100644
index 0000000..0308223
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_significant_terms.go
@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SignificantSignificantTermsAggregation is an aggregation that returns interesting
+// or unusual occurrences of terms in a set.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+type SignificantTermsAggregation struct {
+ field string
+ subAggregations map[string]Aggregation
+
+ minDocCount *int
+ shardMinDocCount *int
+ requiredSize *int
+ shardSize *int
+ filter Filter
+ executionHint string
+}
+
+func NewSignificantTermsAggregation() SignificantTermsAggregation {
+ a := SignificantTermsAggregation{
+ subAggregations: make(map[string]Aggregation, 0),
+ }
+ return a
+}
+
+func (a SignificantTermsAggregation) Field(field string) SignificantTermsAggregation {
+ a.field = field
+ return a
+}
+
+func (a SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) SignificantTermsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a SignificantTermsAggregation) MinDocCount(minDocCount int) SignificantTermsAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) SignificantTermsAggregation {
+ a.shardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a SignificantTermsAggregation) RequiredSize(requiredSize int) SignificantTermsAggregation {
+ a.requiredSize = &requiredSize
+ return a
+}
+
+func (a SignificantTermsAggregation) ShardSize(shardSize int) SignificantTermsAggregation {
+ a.shardSize = &shardSize
+ return a
+}
+
+func (a SignificantTermsAggregation) BackgroundFilter(filter Filter) SignificantTermsAggregation {
+ a.filter = filter
+ return a
+}
+
+func (a SignificantTermsAggregation) ExecutionHint(hint string) SignificantTermsAggregation {
+ a.executionHint = hint
+ return a
+}
+
+func (a SignificantTermsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "query" : {
+ // "terms" : {"force" : [ "British Transport Police" ]}
+ // },
+ // "aggregations" : {
+ // "significantCrimeTypes" : {
+ // "significant_terms" : { "field" : "crime_type" }
+ // }
+ // }
+ // }
+ //
+ // This method returns only the
+ // { "significant_terms" : { "field" : "crime_type" }
+ // part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["significant_terms"] = opts
+
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.requiredSize != nil {
+ opts["size"] = *a.requiredSize // not a typo!
+ }
+ if a.shardSize != nil {
+ opts["shard_size"] = *a.shardSize
+ }
+ if a.minDocCount != nil {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.shardMinDocCount != nil {
+ opts["shard_min_doc_count"] = *a.shardMinDocCount
+ }
+ if a.filter != nil {
+ opts["background_filter"] = a.filter.Source()
+ }
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats.go
new file mode 100644
index 0000000..2bc6b27
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_stats.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// StatsAggregation is a multi-value metrics aggregation that computes stats
+// over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+type StatsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewStatsAggregation() StatsAggregation {
+ a := StatsAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a StatsAggregation) Field(field string) StatsAggregation {
+ a.field = field
+ return a
+}
+
+func (a StatsAggregation) Script(script string) StatsAggregation {
+ a.script = script
+ return a
+}
+
+func (a StatsAggregation) ScriptFile(scriptFile string) StatsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a StatsAggregation) Lang(lang string) StatsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a StatsAggregation) Format(format string) StatsAggregation {
+ a.format = format
+ return a
+}
+
+func (a StatsAggregation) Param(name string, value interface{}) StatsAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a StatsAggregation) SubAggregation(name string, subAggregation Aggregation) StatsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a StatsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_stats" : { "stats" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "stats" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["stats"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum.go
new file mode 100644
index 0000000..2aaee60
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_sum.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SumAggregation is a single-value metrics aggregation that sums up
+// numeric values that are extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+type SumAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewSumAggregation() SumAggregation {
+ a := SumAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a SumAggregation) Field(field string) SumAggregation {
+ a.field = field
+ return a
+}
+
+func (a SumAggregation) Script(script string) SumAggregation {
+ a.script = script
+ return a
+}
+
+func (a SumAggregation) ScriptFile(scriptFile string) SumAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a SumAggregation) Lang(lang string) SumAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a SumAggregation) Format(format string) SumAggregation {
+ a.format = format
+ return a
+}
+
+func (a SumAggregation) Param(name string, value interface{}) SumAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a SumAggregation) SubAggregation(name string, subAggregation Aggregation) SumAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a SumAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "intraday_return" : { "sum" : { "field" : "change" } }
+ // }
+ // }
+ // This method returns only the { "sum" : { "field" : "change" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["sum"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms.go
new file mode 100644
index 0000000..d38c066
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_terms.go
@@ -0,0 +1,339 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermsAggregation is a multi-bucket value source based aggregation
+// where buckets are dynamically built - one per unique value.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+type TermsAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+
+ size *int
+ shardSize *int
+ requiredSize *int
+ minDocCount *int
+ shardMinDocCount *int
+ valueType string
+ order string
+ orderAsc bool
+ includePattern string
+ includeFlags *int
+ excludePattern string
+ excludeFlags *int
+ executionHint string
+ collectionMode string
+ showTermDocCountError *bool
+ includeTerms []string
+ excludeTerms []string
+}
+
+func NewTermsAggregation() TermsAggregation {
+ a := TermsAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation, 0),
+ includeTerms: make([]string, 0),
+ excludeTerms: make([]string, 0),
+ }
+ return a
+}
+
+func (a TermsAggregation) Field(field string) TermsAggregation {
+ a.field = field
+ return a
+}
+
+func (a TermsAggregation) Script(script string) TermsAggregation {
+ a.script = script
+ return a
+}
+
+func (a TermsAggregation) ScriptFile(scriptFile string) TermsAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a TermsAggregation) Lang(lang string) TermsAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a TermsAggregation) Param(name string, value interface{}) TermsAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a TermsAggregation) SubAggregation(name string, subAggregation Aggregation) TermsAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a TermsAggregation) Size(size int) TermsAggregation {
+ a.size = &size
+ return a
+}
+
+func (a TermsAggregation) RequiredSize(requiredSize int) TermsAggregation {
+ a.requiredSize = &requiredSize
+ return a
+}
+
+func (a TermsAggregation) ShardSize(shardSize int) TermsAggregation {
+ a.shardSize = &shardSize
+ return a
+}
+
+func (a TermsAggregation) MinDocCount(minDocCount int) TermsAggregation {
+ a.minDocCount = &minDocCount
+ return a
+}
+
+func (a TermsAggregation) ShardMinDocCount(shardMinDocCount int) TermsAggregation {
+ a.shardMinDocCount = &shardMinDocCount
+ return a
+}
+
+func (a TermsAggregation) Include(regexp string) TermsAggregation {
+ a.includePattern = regexp
+ return a
+}
+
+func (a TermsAggregation) IncludeWithFlags(regexp string, flags int) TermsAggregation {
+ a.includePattern = regexp
+ a.includeFlags = &flags
+ return a
+}
+
+func (a TermsAggregation) Exclude(regexp string) TermsAggregation {
+ a.excludePattern = regexp
+ return a
+}
+
+func (a TermsAggregation) ExcludeWithFlags(regexp string, flags int) TermsAggregation {
+ a.excludePattern = regexp
+ a.excludeFlags = &flags
+ return a
+}
+
+// ValueType can be string, long, or double.
+func (a TermsAggregation) ValueType(valueType string) TermsAggregation {
+ a.valueType = valueType
+ return a
+}
+
+func (a TermsAggregation) Order(order string, asc bool) TermsAggregation {
+ a.order = order
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) OrderByCount(asc bool) TermsAggregation {
+ // "order" : { "_count" : "asc" }
+ a.order = "_count"
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) OrderByCountAsc() TermsAggregation {
+ return a.OrderByCount(true)
+}
+
+func (a TermsAggregation) OrderByCountDesc() TermsAggregation {
+ return a.OrderByCount(false)
+}
+
+func (a TermsAggregation) OrderByTerm(asc bool) TermsAggregation {
+ // "order" : { "_term" : "asc" }
+ a.order = "_term"
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) OrderByTermAsc() TermsAggregation {
+ return a.OrderByTerm(true)
+}
+
+func (a TermsAggregation) OrderByTermDesc() TermsAggregation {
+ return a.OrderByTerm(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a TermsAggregation) OrderByAggregation(aggName string, asc bool) TermsAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "avg_height" : "desc" }
+ // },
+ // "aggs" : {
+ // "avg_height" : { "avg" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName
+ a.orderAsc = asc
+ return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) TermsAggregation {
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : {
+ // "field" : "gender",
+ // "order" : { "height_stats.avg" : "desc" }
+ // },
+ // "aggs" : {
+ // "height_stats" : { "stats" : { "field" : "height" } }
+ // }
+ // }
+ // }
+ // }
+ a.order = aggName + "." + metric
+ a.orderAsc = asc
+ return a
+}
+
+func (a TermsAggregation) ExecutionHint(hint string) TermsAggregation {
+ a.executionHint = hint
+ return a
+}
+
+// Collection mode can be depth_first or breadth_first as of 1.4.0.
+func (a TermsAggregation) CollectionMode(collectionMode string) TermsAggregation {
+ a.collectionMode = collectionMode
+ return a
+}
+
+func (a TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) TermsAggregation {
+ a.showTermDocCountError = &showTermDocCountError
+ return a
+}
+
+func (a TermsAggregation) IncludeTerms(terms ...string) TermsAggregation {
+ a.includeTerms = append(a.includeTerms, terms...)
+ return a
+}
+
+func (a TermsAggregation) ExcludeTerms(terms ...string) TermsAggregation {
+ a.excludeTerms = append(a.excludeTerms, terms...)
+ return a
+}
+
+func (a TermsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "genders" : {
+ // "terms" : { "field" : "gender" }
+ // }
+ // }
+ // }
+ // This method returns only the { "terms" : { "field" : "gender" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["terms"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ // TermsBuilder
+ if a.size != nil && *a.size >= 0 {
+ opts["size"] = *a.size
+ }
+ if a.shardSize != nil && *a.shardSize >= 0 {
+ opts["shard_size"] = *a.shardSize
+ }
+ if a.requiredSize != nil && *a.requiredSize >= 0 {
+ opts["required_size"] = *a.requiredSize
+ }
+ if a.minDocCount != nil && *a.minDocCount >= 0 {
+ opts["min_doc_count"] = *a.minDocCount
+ }
+ if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 {
+ opts["shard_min_doc_count"] = *a.shardMinDocCount
+ }
+ if a.showTermDocCountError != nil {
+ opts["show_term_doc_count_error"] = *a.showTermDocCountError
+ }
+ if a.collectionMode != "" {
+ opts["collect_mode"] = a.collectionMode
+ }
+ if a.valueType != "" {
+ opts["value_type"] = a.valueType
+ }
+ if a.order != "" {
+ o := make(map[string]interface{})
+ if a.orderAsc {
+ o[a.order] = "asc"
+ } else {
+ o[a.order] = "desc"
+ }
+ opts["order"] = o
+ }
+ if len(a.includeTerms) > 0 {
+ opts["include"] = a.includeTerms
+ }
+ if a.includePattern != "" {
+ if a.includeFlags == nil || *a.includeFlags == 0 {
+ opts["include"] = a.includePattern
+ } else {
+ p := make(map[string]interface{})
+ p["pattern"] = a.includePattern
+ p["flags"] = *a.includeFlags
+ opts["include"] = p
+ }
+ }
+ if len(a.excludeTerms) > 0 {
+ opts["exclude"] = a.excludeTerms
+ }
+ if a.excludePattern != "" {
+ if a.excludeFlags == nil || *a.excludeFlags == 0 {
+ opts["exclude"] = a.excludePattern
+ } else {
+ p := make(map[string]interface{})
+ p["pattern"] = a.excludePattern
+ p["flags"] = *a.excludeFlags
+ opts["exclude"] = p
+ }
+ }
+ if a.executionHint != "" {
+ opts["execution_hint"] = a.executionHint
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits.go
new file mode 100644
index 0000000..4930463
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_tophits.go
@@ -0,0 +1,150 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TopHitsAggregation keeps track of the most relevant document
+// being aggregated. This aggregator is intended to be used as a
+// sub aggregator, so that the top matching documents
+// can be aggregated per bucket.
+//
+// It can effectively be used to group result sets by certain fields via
+// a bucket aggregator. One or more bucket aggregators determines by
+// which properties a result set get sliced into.
+//
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+type TopHitsAggregation struct {
+ searchSource *SearchSource
+}
+
+func NewTopHitsAggregation() TopHitsAggregation {
+ a := TopHitsAggregation{
+ searchSource: NewSearchSource(),
+ }
+ return a
+}
+
+func (a TopHitsAggregation) From(from int) TopHitsAggregation {
+ a.searchSource = a.searchSource.From(from)
+ return a
+}
+
+func (a TopHitsAggregation) Size(size int) TopHitsAggregation {
+ a.searchSource = a.searchSource.Size(size)
+ return a
+}
+
+func (a TopHitsAggregation) TrackScores(trackScores bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.TrackScores(trackScores)
+ return a
+}
+
+func (a TopHitsAggregation) Explain(explain bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.Explain(explain)
+ return a
+}
+
+func (a TopHitsAggregation) Version(version bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.Version(version)
+ return a
+}
+
+func (a TopHitsAggregation) NoFields() TopHitsAggregation {
+ a.searchSource = a.searchSource.NoFields()
+ return a
+}
+
+func (a TopHitsAggregation) FetchSource(fetchSource bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.FetchSource(fetchSource)
+ return a
+}
+
+func (a TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) TopHitsAggregation {
+ a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext)
+ return a
+}
+
+func (a TopHitsAggregation) FieldDataFields(fieldDataFields ...string) TopHitsAggregation {
+ a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...)
+ return a
+}
+
+func (a TopHitsAggregation) FieldDataField(fieldDataField string) TopHitsAggregation {
+ a.searchSource = a.searchSource.FieldDataField(fieldDataField)
+ return a
+}
+
+func (a TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) TopHitsAggregation {
+ a.searchSource = a.searchSource.ScriptFields(scriptFields...)
+ return a
+}
+
+func (a TopHitsAggregation) ScriptField(scriptField *ScriptField) TopHitsAggregation {
+ a.searchSource = a.searchSource.ScriptField(scriptField)
+ return a
+}
+
+func (a TopHitsAggregation) PartialFields(partialFields ...*PartialField) TopHitsAggregation {
+ a.searchSource = a.searchSource.PartialFields(partialFields...)
+ return a
+}
+
+func (a TopHitsAggregation) PartialField(partialField *PartialField) TopHitsAggregation {
+ a.searchSource = a.searchSource.PartialField(partialField)
+ return a
+}
+
+func (a TopHitsAggregation) Sort(field string, ascending bool) TopHitsAggregation {
+ a.searchSource = a.searchSource.Sort(field, ascending)
+ return a
+}
+
+func (a TopHitsAggregation) SortWithInfo(info SortInfo) TopHitsAggregation {
+ a.searchSource = a.searchSource.SortWithInfo(info)
+ return a
+}
+
+func (a TopHitsAggregation) SortBy(sorter ...Sorter) TopHitsAggregation {
+ a.searchSource = a.searchSource.SortBy(sorter...)
+ return a
+}
+
+func (a TopHitsAggregation) Highlight(highlight *Highlight) TopHitsAggregation {
+ a.searchSource = a.searchSource.Highlight(highlight)
+ return a
+}
+
+func (a TopHitsAggregation) Highlighter() *Highlight {
+ return a.searchSource.Highlighter()
+}
+
+func (a TopHitsAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs": {
+ // "top_tag_hits": {
+ // "top_hits": {
+ // "sort": [
+ // {
+ // "last_activity_date": {
+ // "order": "desc"
+ // }
+ // }
+ // ],
+ // "_source": {
+ // "include": [
+ // "title"
+ // ]
+ // },
+ // "size" : 1
+ // }
+ // }
+ // }
+ // }
+ // This method returns only the { "top_hits" : { ... } } part.
+
+ source := make(map[string]interface{})
+ source["top_hits"] = a.searchSource.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count.go
new file mode 100644
index 0000000..b38d783
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_aggs_value_count.go
@@ -0,0 +1,111 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ValueCountAggregation is a single-value metrics aggregation that counts
+// the number of values that are extracted from the aggregated documents.
+// These values can be extracted either from specific fields in the documents,
+// or be generated by a provided script. Typically, this aggregator will be
+// used in conjunction with other single-value aggregations.
+// For example, when computing the avg one might be interested in the
+// number of values the average is computed over.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+type ValueCountAggregation struct {
+ field string
+ script string
+ scriptFile string
+ lang string
+ format string
+ params map[string]interface{}
+ subAggregations map[string]Aggregation
+}
+
+func NewValueCountAggregation() ValueCountAggregation {
+ a := ValueCountAggregation{
+ params: make(map[string]interface{}),
+ subAggregations: make(map[string]Aggregation),
+ }
+ return a
+}
+
+func (a ValueCountAggregation) Field(field string) ValueCountAggregation {
+ a.field = field
+ return a
+}
+
+func (a ValueCountAggregation) Script(script string) ValueCountAggregation {
+ a.script = script
+ return a
+}
+
+func (a ValueCountAggregation) ScriptFile(scriptFile string) ValueCountAggregation {
+ a.scriptFile = scriptFile
+ return a
+}
+
+func (a ValueCountAggregation) Lang(lang string) ValueCountAggregation {
+ a.lang = lang
+ return a
+}
+
+func (a ValueCountAggregation) Format(format string) ValueCountAggregation {
+ a.format = format
+ return a
+}
+
+func (a ValueCountAggregation) Param(name string, value interface{}) ValueCountAggregation {
+ a.params[name] = value
+ return a
+}
+
+func (a ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) ValueCountAggregation {
+ a.subAggregations[name] = subAggregation
+ return a
+}
+
+func (a ValueCountAggregation) Source() interface{} {
+ // Example:
+ // {
+ // "aggs" : {
+ // "grades_count" : { "value_count" : { "field" : "grade" } }
+ // }
+ // }
+ // This method returns only the { "value_count" : { "field" : "grade" } } part.
+
+ source := make(map[string]interface{})
+ opts := make(map[string]interface{})
+ source["value_count"] = opts
+
+ // ValuesSourceAggregationBuilder
+ if a.field != "" {
+ opts["field"] = a.field
+ }
+ if a.script != "" {
+ opts["script"] = a.script
+ }
+ if a.scriptFile != "" {
+ opts["script_file"] = a.scriptFile
+ }
+ if a.lang != "" {
+ opts["lang"] = a.lang
+ }
+ if a.format != "" {
+ opts["format"] = a.format
+ }
+ if len(a.params) > 0 {
+ opts["params"] = a.params
+ }
+
+ // AggregationBuilder (SubAggregations)
+ if len(a.subAggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ source["aggregations"] = aggsMap
+ for name, aggregate := range a.subAggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets.go
new file mode 100644
index 0000000..2e69980
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets.go
@@ -0,0 +1,12 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents a glimpse into the data.
+// For more details about facets, visit:
+// http://elasticsearch.org/guide/reference/api/search/facets/
+type Facet interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram.go
new file mode 100644
index 0000000..b13d27e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_date_histogram.go
@@ -0,0 +1,198 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A specific histogram facet that can work with date field types
+// enhancing it over the regular histogram facet.
+// See:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-date-histogram-facet.html
+type DateHistogramFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField *string
+ interval string
+ preZone string
+ preZoneAdjustLargeInterval *bool
+ postZone string
+ preOffset string
+ postOffset string
+ factor *float32
+ comparatorType string
+ valueScript string
+ params map[string]interface{}
+ lang string
+}
+
+func NewDateHistogramFacet() DateHistogramFacet {
+ return DateHistogramFacet{
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f DateHistogramFacet) FacetFilter(filter Facet) DateHistogramFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f DateHistogramFacet) Global(global bool) DateHistogramFacet {
+ f.global = &global
+ return f
+}
+
+func (f DateHistogramFacet) Nested(nested string) DateHistogramFacet {
+ f.nested = nested
+ return f
+}
+
+func (f DateHistogramFacet) Mode(mode string) DateHistogramFacet {
+ f.mode = mode
+ return f
+}
+
+func (f DateHistogramFacet) Field(field string) DateHistogramFacet {
+ f.keyField = field
+ return f
+}
+
+func (f DateHistogramFacet) KeyField(keyField string) DateHistogramFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f DateHistogramFacet) ValueField(valueField string) DateHistogramFacet {
+ f.valueField = &valueField
+ return f
+}
+
+func (f DateHistogramFacet) ValueScript(valueScript string) DateHistogramFacet {
+ f.valueScript = valueScript
+ return f
+}
+
+func (f DateHistogramFacet) Param(name string, value interface{}) DateHistogramFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f DateHistogramFacet) Lang(lang string) DateHistogramFacet {
+ f.lang = lang
+ return f
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (f DateHistogramFacet) Interval(interval string) DateHistogramFacet {
+ f.interval = interval
+ return f
+}
+
+func (f DateHistogramFacet) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramFacet {
+ f.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+ return f
+}
+
+func (f DateHistogramFacet) PreZone(preZone string) DateHistogramFacet {
+ f.preZone = preZone
+ return f
+}
+
+func (f DateHistogramFacet) PostZone(postZone string) DateHistogramFacet {
+ f.postZone = postZone
+ return f
+}
+
+func (f DateHistogramFacet) PreOffset(preOffset string) DateHistogramFacet {
+ f.preOffset = preOffset
+ return f
+}
+
+func (f DateHistogramFacet) PostOffset(postOffset string) DateHistogramFacet {
+ f.postOffset = postOffset
+ return f
+}
+
+func (f DateHistogramFacet) Factor(factor float32) DateHistogramFacet {
+ f.factor = &factor
+ return f
+}
+
+func (f DateHistogramFacet) Comparator(comparator string) DateHistogramFacet {
+ f.comparatorType = comparator
+ return f
+}
+
+func (f DateHistogramFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f DateHistogramFacet) Source() interface{} {
+ /*
+ "histo1" : {
+ "date_histogram" : {
+ "field" : "field_name",
+ "interval" : "day"
+ }
+ }
+ */
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ facet := make(map[string]interface{})
+ source["date_histogram"] = facet
+
+ if f.valueField != nil {
+ facet["key_field"] = f.keyField
+ facet["value_field"] = *f.valueField
+ } else {
+ facet["field"] = f.keyField
+ }
+
+ if f.valueScript != "" {
+ facet["value_script"] = f.valueScript
+ if f.lang != "" {
+ facet["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ facet["params"] = f.params
+ }
+ }
+ facet["interval"] = f.interval
+ if f.preZone != "" {
+ facet["pre_zone"] = f.preZone
+ }
+ if f.preZoneAdjustLargeInterval != nil {
+ facet["pre_zone_adjust_large_interval"] = *f.preZoneAdjustLargeInterval
+ }
+ if f.postZone != "" {
+ facet["post_zone"] = f.postZone
+ }
+ if f.preOffset != "" {
+ facet["pre_offset"] = f.preOffset
+ }
+ if f.postOffset != "" {
+ facet["post_offset"] = f.postOffset
+ }
+ if f.factor != nil {
+ facet["factor"] = *f.factor
+ }
+ if f.comparatorType != "" {
+ facet["comparator"] = f.comparatorType
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter.go
new file mode 100644
index 0000000..1b5719d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_filter.go
@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter facet (not to be confused with a facet filter) allows you
+// to return a count of the hits matching the filter.
+// The filter itself can be expressed using the Query DSL.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-filter-facet.html
+type FilterFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ filter Filter
+}
+
+func NewFilterFacet() FilterFacet {
+ return FilterFacet{}
+}
+
+func (f FilterFacet) FacetFilter(filter Facet) FilterFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f FilterFacet) Global(global bool) FilterFacet {
+ f.global = &global
+ return f
+}
+
+func (f FilterFacet) Nested(nested string) FilterFacet {
+ f.nested = nested
+ return f
+}
+
+func (f FilterFacet) Mode(mode string) FilterFacet {
+ f.mode = mode
+ return f
+}
+
+func (f FilterFacet) Filter(filter Filter) FilterFacet {
+ f.filter = filter
+ return f
+}
+
+func (f FilterFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f FilterFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ source["filter"] = f.filter.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go
new file mode 100644
index 0000000..faa3ee7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_geo_distance.go
@@ -0,0 +1,202 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The geo_distance facet is a facet providing information for ranges of
+// distances from a provided geo_point including count of the number of hits
+// that fall within each range, and aggregation information (like total).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-geo-distance-facet.html
+type GeoDistanceFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ fieldName string
+ valueFieldName string
+ lat float64
+ lon float64
+ geoHash string
+ geoDistance string
+ unit string
+ params map[string]interface{}
+ valueScript string
+ lang string
+ entries []geoDistanceFacetEntry
+}
+
+func NewGeoDistanceFacet() GeoDistanceFacet {
+ return GeoDistanceFacet{
+ params: make(map[string]interface{}),
+ entries: make([]geoDistanceFacetEntry, 0),
+ }
+}
+
+func (f GeoDistanceFacet) FacetFilter(filter Facet) GeoDistanceFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f GeoDistanceFacet) Global(global bool) GeoDistanceFacet {
+ f.global = &global
+ return f
+}
+
+func (f GeoDistanceFacet) Nested(nested string) GeoDistanceFacet {
+ f.nested = nested
+ return f
+}
+
+func (f GeoDistanceFacet) Mode(mode string) GeoDistanceFacet {
+ f.mode = mode
+ return f
+}
+
+func (f GeoDistanceFacet) Field(fieldName string) GeoDistanceFacet {
+ f.fieldName = fieldName
+ return f
+}
+
+func (f GeoDistanceFacet) ValueField(valueFieldName string) GeoDistanceFacet {
+ f.valueFieldName = valueFieldName
+ return f
+}
+
+func (f GeoDistanceFacet) ValueScript(valueScript string) GeoDistanceFacet {
+ f.valueScript = valueScript
+ return f
+}
+
+func (f GeoDistanceFacet) Lang(lang string) GeoDistanceFacet {
+ f.lang = lang
+ return f
+}
+
+func (f GeoDistanceFacet) ScriptParam(name string, value interface{}) GeoDistanceFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f GeoDistanceFacet) Point(lat, lon float64) GeoDistanceFacet {
+ f.lat = lat
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFacet) Lat(lat float64) GeoDistanceFacet {
+ f.lat = lat
+ return f
+}
+
+func (f GeoDistanceFacet) Lon(lon float64) GeoDistanceFacet {
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFacet) GeoHash(geoHash string) GeoDistanceFacet {
+ f.geoHash = geoHash
+ return f
+}
+
+func (f GeoDistanceFacet) GeoDistance(geoDistance string) GeoDistanceFacet {
+ f.geoDistance = geoDistance
+ return f
+}
+
+func (f GeoDistanceFacet) AddRange(from, to float64) GeoDistanceFacet {
+ f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: to})
+ return f
+}
+
+func (f GeoDistanceFacet) AddUnboundedTo(from float64) GeoDistanceFacet {
+ f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: nil})
+ return f
+}
+
+func (f GeoDistanceFacet) AddUnboundedFrom(to float64) GeoDistanceFacet {
+ f.entries = append(f.entries, geoDistanceFacetEntry{From: nil, To: to})
+ return f
+}
+
+func (f GeoDistanceFacet) Unit(distanceUnit string) GeoDistanceFacet {
+ f.unit = distanceUnit
+ return f
+}
+
+func (f GeoDistanceFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f GeoDistanceFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["geo_distance"] = opts
+
+ if f.geoHash != "" {
+ opts[f.fieldName] = f.geoHash
+ } else {
+ opts[f.fieldName] = []float64{f.lat, f.lon}
+ }
+ if f.valueFieldName != "" {
+ opts["value_field"] = f.valueFieldName
+ }
+ if f.valueScript != "" {
+ opts["value_script"] = f.valueScript
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range f.entries {
+ r := make(map[string]interface{})
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ if f.unit != "" {
+ opts["unit"] = f.unit
+ }
+ if f.geoDistance != "" {
+ opts["distance_type"] = f.geoDistance
+ }
+
+ return source
+}
+
+type geoDistanceFacetEntry struct {
+ From interface{}
+ To interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram.go
new file mode 100644
index 0000000..9fa0695
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram.go
@@ -0,0 +1,110 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Histogram Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/histogram-facet.html
+type HistogramFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField string
+ interval int64
+ timeInterval string
+ comparatorType string
+}
+
+func NewHistogramFacet() HistogramFacet {
+ return HistogramFacet{
+ interval: -1,
+ }
+}
+
+func (f HistogramFacet) FacetFilter(filter Facet) HistogramFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f HistogramFacet) Global(global bool) HistogramFacet {
+ f.global = &global
+ return f
+}
+
+func (f HistogramFacet) Nested(nested string) HistogramFacet {
+ f.nested = nested
+ return f
+}
+
+func (f HistogramFacet) Mode(mode string) HistogramFacet {
+ f.mode = mode
+ return f
+}
+
+func (f HistogramFacet) Field(field string) HistogramFacet {
+ f.keyField = field
+ return f
+}
+
+func (f HistogramFacet) KeyField(keyField string) HistogramFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f HistogramFacet) ValueField(valueField string) HistogramFacet {
+ f.valueField = valueField
+ return f
+}
+
+func (f HistogramFacet) Interval(interval int64) HistogramFacet {
+ f.interval = interval
+ return f
+}
+
+func (f HistogramFacet) TimeInterval(timeInterval string) HistogramFacet {
+ f.timeInterval = timeInterval
+ return f
+}
+
+func (f HistogramFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f HistogramFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ if f.valueField != "" {
+ opts["key_field"] = f.keyField
+ opts["value_field"] = f.valueField
+ } else {
+ opts["field"] = f.keyField
+ }
+ if f.timeInterval != "" {
+ opts["time_interval"] = f.timeInterval
+ } else {
+ opts["interval"] = f.interval
+ }
+
+ if f.comparatorType != "" {
+ opts["comparator"] = f.comparatorType
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script.go
new file mode 100644
index 0000000..fcf815f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_histogram_script.go
@@ -0,0 +1,120 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Histogram Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/histogram-facet.html
+type HistogramScriptFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ lang string
+ keyField string
+ keyScript string
+ valueScript string
+ params map[string]interface{}
+ interval int64
+ comparatorType string
+}
+
+func NewHistogramScriptFacet() HistogramScriptFacet {
+ return HistogramScriptFacet{
+ interval: -1,
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f HistogramScriptFacet) FacetFilter(filter Facet) HistogramScriptFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f HistogramScriptFacet) Global(global bool) HistogramScriptFacet {
+ f.global = &global
+ return f
+}
+
+func (f HistogramScriptFacet) Nested(nested string) HistogramScriptFacet {
+ f.nested = nested
+ return f
+}
+
+func (f HistogramScriptFacet) Mode(mode string) HistogramScriptFacet {
+ f.mode = mode
+ return f
+}
+
+func (f HistogramScriptFacet) KeyField(keyField string) HistogramScriptFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f HistogramScriptFacet) KeyScript(keyScript string) HistogramScriptFacet {
+ f.keyScript = keyScript
+ return f
+}
+
+func (f HistogramScriptFacet) ValueScript(valueScript string) HistogramScriptFacet {
+ f.valueScript = valueScript
+ return f
+}
+
+func (f HistogramScriptFacet) Interval(interval int64) HistogramScriptFacet {
+ f.interval = interval
+ return f
+}
+
+func (f HistogramScriptFacet) Param(name string, value interface{}) HistogramScriptFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f HistogramScriptFacet) Comparator(comparatorType string) HistogramScriptFacet {
+ f.comparatorType = comparatorType
+ return f
+}
+
+func (f HistogramScriptFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f HistogramScriptFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["histogram"] = opts
+
+ if f.keyField != "" {
+ opts["key_field"] = f.keyField
+ } else if f.keyScript != "" {
+ opts["key_script"] = f.keyScript
+ }
+ opts["value_script"] = f.valueScript
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if f.interval > 0 {
+ opts["interval"] = f.interval
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ if f.comparatorType != "" {
+ opts["comparator"] = f.comparatorType
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query.go
new file mode 100644
index 0000000..184c8b3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_query.go
@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Query Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/query-facet.html
+type QueryFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ query Query
+}
+
+func NewQueryFacet() QueryFacet {
+ return QueryFacet{}
+}
+
+func (f QueryFacet) FacetFilter(filter Facet) QueryFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f QueryFacet) Global(global bool) QueryFacet {
+ f.global = &global
+ return f
+}
+
+func (f QueryFacet) Nested(nested string) QueryFacet {
+ f.nested = nested
+ return f
+}
+
+func (f QueryFacet) Mode(mode string) QueryFacet {
+ f.mode = mode
+ return f
+}
+
+func (f QueryFacet) Query(query Query) QueryFacet {
+ f.query = query
+ return f
+}
+
+func (f QueryFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f QueryFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ source["query"] = f.query.Source()
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range.go
new file mode 100644
index 0000000..864b355
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_range.go
@@ -0,0 +1,158 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "time"
+)
+
+// Range facet allows to specify a set of ranges and get both the
+// number of docs (count) that fall within each range,
+// and aggregated data either based on the field, or using another field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-range-facet.html
+type RangeFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField string
+ entries []rangeFacetEntry
+}
+
+type rangeFacetEntry struct {
+ From interface{}
+ To interface{}
+}
+
+func NewRangeFacet() RangeFacet {
+ return RangeFacet{
+ entries: make([]rangeFacetEntry, 0),
+ }
+}
+
+func (f RangeFacet) FacetFilter(filter Facet) RangeFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f RangeFacet) Global(global bool) RangeFacet {
+ f.global = &global
+ return f
+}
+
+func (f RangeFacet) Nested(nested string) RangeFacet {
+ f.nested = nested
+ return f
+}
+
+func (f RangeFacet) Mode(mode string) RangeFacet {
+ f.mode = mode
+ return f
+}
+
+func (f RangeFacet) Field(field string) RangeFacet {
+ f.keyField = field
+ f.valueField = field
+ return f
+}
+
+func (f RangeFacet) KeyField(keyField string) RangeFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f RangeFacet) ValueField(valueField string) RangeFacet {
+ f.valueField = valueField
+ return f
+}
+
+func (f RangeFacet) AddRange(from, to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: to})
+ return f
+}
+
+func (f RangeFacet) AddUnboundedTo(from interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: nil})
+ return f
+}
+
+func (f RangeFacet) AddUnboundedFrom(to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: nil, To: to})
+ return f
+}
+
+func (f RangeFacet) Lt(to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: nil, To: to})
+ return f
+}
+
+func (f RangeFacet) Between(from, to interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: to})
+ return f
+}
+
+func (f RangeFacet) Gt(from interface{}) RangeFacet {
+ f.entries = append(f.entries, rangeFacetEntry{From: from, To: nil})
+ return f
+}
+
+func (f RangeFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f RangeFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["range"] = opts
+
+ if f.valueField != "" && f.keyField != f.valueField {
+ opts["key_field"] = f.keyField
+ opts["value_field"] = f.valueField
+ } else {
+ opts["field"] = f.keyField
+ }
+
+ ranges := make([]interface{}, 0)
+ for _, ent := range f.entries {
+ r := make(map[string]interface{})
+ if ent.From != nil {
+ switch from := ent.From.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["from"] = from
+ case time.Time:
+ r["from"] = from.Format(time.RFC3339)
+ case string:
+ r["from"] = from
+ }
+ }
+ if ent.To != nil {
+ switch to := ent.To.(type) {
+ case int, int16, int32, int64, float32, float64:
+ r["to"] = to
+ case time.Time:
+ r["to"] = to.Format(time.RFC3339)
+ case string:
+ r["to"] = to
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ opts["ranges"] = ranges
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical.go
new file mode 100644
index 0000000..5a813a1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical.go
@@ -0,0 +1,88 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Statistical facet allows to compute statistical data on a numeric fields.
+// The statistical data include count, total, sum of squares, mean (average),
+// minimum, maximum, variance, and standard deviation.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-statistical-facet.html
+type StatisticalFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ fieldName string
+ fieldNames []string
+}
+
+func NewStatisticalFacet() StatisticalFacet {
+ return StatisticalFacet{
+ fieldNames: make([]string, 0),
+ }
+}
+
+func (f StatisticalFacet) FacetFilter(filter Facet) StatisticalFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f StatisticalFacet) Global(global bool) StatisticalFacet {
+ f.global = &global
+ return f
+}
+
+func (f StatisticalFacet) Nested(nested string) StatisticalFacet {
+ f.nested = nested
+ return f
+}
+
+func (f StatisticalFacet) Mode(mode string) StatisticalFacet {
+ f.mode = mode
+ return f
+}
+
+func (f StatisticalFacet) Field(fieldName string) StatisticalFacet {
+ f.fieldName = fieldName
+ return f
+}
+
+func (f StatisticalFacet) Fields(fieldNames ...string) StatisticalFacet {
+ f.fieldNames = append(f.fieldNames, fieldNames...)
+ return f
+}
+
+func (f StatisticalFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f StatisticalFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["statistical"] = opts
+
+ if len(f.fieldNames) > 0 {
+ if len(f.fieldNames) == 1 {
+ opts["field"] = f.fieldNames[0]
+ } else {
+ opts["fields"] = f.fieldNames
+ }
+ } else {
+ opts["field"] = f.fieldName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script.go
new file mode 100644
index 0000000..36a60d5
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_statistical_script.go
@@ -0,0 +1,92 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Statistical facet allows to compute statistical data on a numeric fields.
+// The statistical data include count, total, sum of squares, mean (average),
+// minimum, maximum, variance, and standard deviation.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-statistical-facet.html
+type StatisticalScriptFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ lang string
+ script string
+ params map[string]interface{}
+}
+
+func NewStatisticalScriptFacet() StatisticalScriptFacet {
+ return StatisticalScriptFacet{
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f StatisticalScriptFacet) FacetFilter(filter Facet) StatisticalScriptFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f StatisticalScriptFacet) Global(global bool) StatisticalScriptFacet {
+ f.global = &global
+ return f
+}
+
+func (f StatisticalScriptFacet) Nested(nested string) StatisticalScriptFacet {
+ f.nested = nested
+ return f
+}
+
+func (f StatisticalScriptFacet) Mode(mode string) StatisticalScriptFacet {
+ f.mode = mode
+ return f
+}
+
+func (f StatisticalScriptFacet) Lang(lang string) StatisticalScriptFacet {
+ f.lang = lang
+ return f
+}
+
+func (f StatisticalScriptFacet) Script(script string) StatisticalScriptFacet {
+ f.script = script
+ return f
+}
+
+func (f StatisticalScriptFacet) Param(name string, value interface{}) StatisticalScriptFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f StatisticalScriptFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f StatisticalScriptFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["statistical"] = opts
+
+ opts["script"] = f.script
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms.go
new file mode 100644
index 0000000..a013342
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms.go
@@ -0,0 +1,203 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Allow to specify field facets that return the N most frequent terms.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-facet.html
+type TermsFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+
+ fieldName string
+ fields []string
+ size int
+ shardSize *int
+ allTerms *bool
+ exclude []string
+ regex string
+ regexFlags string
+ comparatorType string
+ script string
+ lang string
+ params map[string]interface{}
+ executionHint string
+ index string
+}
+
+func NewTermsFacet() TermsFacet {
+ f := TermsFacet{
+ size: 10,
+ fields: make([]string, 0),
+ exclude: make([]string, 0),
+ params: make(map[string]interface{}),
+ }
+ return f
+}
+
+func (f TermsFacet) FacetFilter(filter Facet) TermsFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f TermsFacet) Global(global bool) TermsFacet {
+ f.global = &global
+ return f
+}
+
+func (f TermsFacet) Nested(nested string) TermsFacet {
+ f.nested = nested
+ return f
+}
+
+func (f TermsFacet) Mode(mode string) TermsFacet {
+ f.mode = mode
+ return f
+}
+
+func (f TermsFacet) Field(fieldName string) TermsFacet {
+ f.fieldName = fieldName
+ return f
+}
+
+func (f TermsFacet) Fields(fields ...string) TermsFacet {
+ f.fields = append(f.fields, fields...)
+ return f
+}
+
+func (f TermsFacet) ScriptField(scriptField string) TermsFacet {
+ f.script = scriptField
+ return f
+}
+
+func (f TermsFacet) Exclude(exclude ...string) TermsFacet {
+ f.exclude = append(f.exclude, exclude...)
+ return f
+}
+
+func (f TermsFacet) Size(size int) TermsFacet {
+ f.size = size
+ return f
+}
+
+func (f TermsFacet) ShardSize(shardSize int) TermsFacet {
+ f.shardSize = &shardSize
+ return f
+}
+
+func (f TermsFacet) Regex(regex string) TermsFacet {
+ f.regex = regex
+ return f
+}
+
+func (f TermsFacet) RegexFlags(regexFlags string) TermsFacet {
+ f.regexFlags = regexFlags
+ return f
+}
+
+func (f TermsFacet) Order(order string) TermsFacet {
+ f.comparatorType = order
+ return f
+}
+
+func (f TermsFacet) Comparator(comparatorType string) TermsFacet {
+ f.comparatorType = comparatorType
+ return f
+}
+
+func (f TermsFacet) Script(script string) TermsFacet {
+ f.script = script
+ return f
+}
+
+func (f TermsFacet) Lang(lang string) TermsFacet {
+ f.lang = lang
+ return f
+}
+
+func (f TermsFacet) ExecutionHint(hint string) TermsFacet {
+ f.executionHint = hint
+ return f
+}
+
+func (f TermsFacet) Param(name string, value interface{}) TermsFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f TermsFacet) AllTerms(allTerms bool) TermsFacet {
+ f.allTerms = &allTerms
+ return f
+}
+
+func (f TermsFacet) Index(index string) TermsFacet {
+ f.index = index
+ return f
+}
+
+func (f TermsFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f TermsFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["terms"] = opts
+
+ if len(f.fields) > 0 {
+ if len(f.fields) == 1 {
+ opts["field"] = f.fields[0]
+ } else {
+ opts["fields"] = f.fields
+ }
+ } else {
+ opts["field"] = f.fieldName
+ }
+ opts["size"] = f.size
+ if f.shardSize != nil && *f.shardSize > f.size {
+ opts["shard_size"] = *f.shardSize
+ }
+ if len(f.exclude) > 0 {
+ opts["exclude"] = f.exclude
+ }
+ if f.regex != "" {
+ opts["regex"] = f.regex
+ if f.regexFlags != "" {
+ opts["regex_flags"] = f.regexFlags
+ }
+ }
+ if f.comparatorType != "" {
+ opts["order"] = f.comparatorType
+ }
+ if f.allTerms != nil {
+ opts["all_terms"] = *f.allTerms
+ }
+ if f.script != "" {
+ opts["script"] = f.script
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ }
+ if f.executionHint != "" {
+ opts["execution_hint"] = f.executionHint
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats.go
new file mode 100644
index 0000000..8f68f3d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_facets_terms_stats.go
@@ -0,0 +1,142 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The terms_stats facet combines both the terms and statistical allowing
+// to compute stats computed on a field, per term value driven
+// by another field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-stats-facet.html
+type TermsStatsFacet struct {
+ facetFilter Filter
+ global *bool
+ nested string
+ mode string
+ keyField string
+ valueField string
+ size int
+ shardSize int
+ comparatorType string
+ script string
+ lang string
+ params map[string]interface{}
+}
+
+func NewTermsStatsFacet() TermsStatsFacet {
+ return TermsStatsFacet{
+ size: -1,
+ shardSize: -1,
+ params: make(map[string]interface{}),
+ }
+}
+
+func (f TermsStatsFacet) FacetFilter(filter Facet) TermsStatsFacet {
+ f.facetFilter = filter
+ return f
+}
+
+func (f TermsStatsFacet) Global(global bool) TermsStatsFacet {
+ f.global = &global
+ return f
+}
+
+func (f TermsStatsFacet) Nested(nested string) TermsStatsFacet {
+ f.nested = nested
+ return f
+}
+
+func (f TermsStatsFacet) Mode(mode string) TermsStatsFacet {
+ f.mode = mode
+ return f
+}
+
+func (f TermsStatsFacet) KeyField(keyField string) TermsStatsFacet {
+ f.keyField = keyField
+ return f
+}
+
+func (f TermsStatsFacet) ValueField(valueField string) TermsStatsFacet {
+ f.valueField = valueField
+ return f
+}
+
+func (f TermsStatsFacet) Order(comparatorType string) TermsStatsFacet {
+ f.comparatorType = comparatorType
+ return f
+}
+
+func (f TermsStatsFacet) Size(size int) TermsStatsFacet {
+ f.size = size
+ return f
+}
+
+func (f TermsStatsFacet) ShardSize(shardSize int) TermsStatsFacet {
+ f.shardSize = shardSize
+ return f
+}
+
+func (f TermsStatsFacet) AllTerms() TermsStatsFacet {
+ f.size = 0
+ return f
+}
+
+func (f TermsStatsFacet) ValueScript(script string) TermsStatsFacet {
+ f.script = script
+ return f
+}
+
+func (f TermsStatsFacet) Param(name string, value interface{}) TermsStatsFacet {
+ f.params[name] = value
+ return f
+}
+
+func (f TermsStatsFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+ if f.facetFilter != nil {
+ source["facet_filter"] = f.facetFilter.Source()
+ }
+ if f.nested != "" {
+ source["nested"] = f.nested
+ }
+ if f.global != nil {
+ source["global"] = *f.global
+ }
+ if f.mode != "" {
+ source["mode"] = f.mode
+ }
+}
+
+func (f TermsStatsFacet) Source() interface{} {
+ source := make(map[string]interface{})
+ f.addFilterFacetAndGlobal(source)
+ opts := make(map[string]interface{})
+ source["terms_stats"] = opts
+
+ opts["key_field"] = f.keyField
+ if f.valueField != "" {
+ opts["value_field"] = f.valueField
+ }
+
+ if f.script != "" {
+ opts["value_script"] = f.script
+ if f.lang != "" {
+ opts["lang"] = f.lang
+ }
+ if len(f.params) > 0 {
+ opts["params"] = f.params
+ }
+ }
+
+ if f.comparatorType != "" {
+ opts["order"] = f.comparatorType
+ }
+
+ if f.size != -1 {
+ opts["size"] = f.size
+ }
+ if f.shardSize > f.size {
+ opts["shard_size"] = f.shardSize
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and.go
new file mode 100644
index 0000000..60c0117
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_and.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches documents using AND boolean operator
+// on other filters. Can be placed within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-and-filter.html
+type AndFilter struct {
+ filters []Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewAndFilter(filters ...Filter) AndFilter {
+ f := AndFilter{
+ filters: make([]Filter, 0),
+ }
+ if len(filters) > 0 {
+ f.filters = append(f.filters, filters...)
+ }
+ return f
+}
+
+func (f AndFilter) Add(filter Filter) AndFilter {
+ f.filters = append(f.filters, filter)
+ return f
+}
+
+func (f AndFilter) Cache(cache bool) AndFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f AndFilter) CacheKey(cacheKey string) AndFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f AndFilter) FilterName(filterName string) AndFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f AndFilter) Source() interface{} {
+ // {
+ // "and" : [
+ // ... filters ...
+ // ]
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["and"] = params
+
+ filters := make([]interface{}, 0)
+ for _, filter := range f.filters {
+ filters = append(filters, filter.Source())
+ }
+ params["filters"] = filters
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool.go
new file mode 100644
index 0000000..75d1e86
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_bool.go
@@ -0,0 +1,135 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches documents matching boolean combinations
+// of other queries. Similar in concept to Boolean query,
+// except that the clauses are other filters.
+// Can be placed within queries that accept a filter.
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-bool-filter.html
+type BoolFilter struct {
+ mustClauses []Filter
+ shouldClauses []Filter
+ mustNotClauses []Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+// NewBoolFilter creates a new bool filter.
+func NewBoolFilter() BoolFilter {
+ f := BoolFilter{
+ mustClauses: make([]Filter, 0),
+ shouldClauses: make([]Filter, 0),
+ mustNotClauses: make([]Filter, 0),
+ }
+ return f
+}
+
+func (f BoolFilter) Must(filters ...Filter) BoolFilter {
+ f.mustClauses = append(f.mustClauses, filters...)
+ return f
+}
+
+func (f BoolFilter) MustNot(filters ...Filter) BoolFilter {
+ f.mustNotClauses = append(f.mustNotClauses, filters...)
+ return f
+}
+
+func (f BoolFilter) Should(filters ...Filter) BoolFilter {
+ f.shouldClauses = append(f.shouldClauses, filters...)
+ return f
+}
+
+func (f BoolFilter) FilterName(filterName string) BoolFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f BoolFilter) Cache(cache bool) BoolFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f BoolFilter) CacheKey(cacheKey string) BoolFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+// Creates the query source for the bool query.
+func (f BoolFilter) Source() interface{} {
+ // {
+ // "bool" : {
+ // "must" : {
+ // "term" : { "user" : "kimchy" }
+ // },
+ // "must_not" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // },
+ // "should" : [
+ // {
+ // "term" : { "tag" : "wow" }
+ // },
+ // {
+ // "term" : { "tag" : "elasticsearch" }
+ // }
+ // ],
+ // "_cache" : true
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ boolClause := make(map[string]interface{})
+ source["bool"] = boolClause
+
+ // must
+ if len(f.mustClauses) == 1 {
+ boolClause["must"] = f.mustClauses[0].Source()
+ } else if len(f.mustClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range f.mustClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must"] = clauses
+ }
+
+ // must_not
+ if len(f.mustNotClauses) == 1 {
+ boolClause["must_not"] = f.mustNotClauses[0].Source()
+ } else if len(f.mustNotClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range f.mustNotClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must_not"] = clauses
+ }
+
+ // should
+ if len(f.shouldClauses) == 1 {
+ boolClause["should"] = f.shouldClauses[0].Source()
+ } else if len(f.shouldClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range f.shouldClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["should"] = clauses
+ }
+
+ if f.filterName != "" {
+ boolClause["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ boolClause["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ boolClause["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists.go
new file mode 100644
index 0000000..7785880
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_exists.go
@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents where a specific field has a value in them.
+// For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/exists-filter.html
+type ExistsFilter struct {
+ Filter
+ name string
+ filterName string
+}
+
+func NewExistsFilter(name string) ExistsFilter {
+ f := ExistsFilter{name: name}
+ return f
+}
+
+func (f ExistsFilter) FilterName(filterName string) ExistsFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f ExistsFilter) Source() interface{} {
+ // {
+ // "exists" : {
+ // "field" : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["exists"] = params
+ params["field"] = f.name
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance.go
new file mode 100644
index 0000000..17f8812
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_distance.go
@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceFilter filters documents that include only hits that exists
+// within a specific distance from a geo point.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-filter.html
+type GeoDistanceFilter struct {
+ Filter
+ name string
+ distance string
+ lat float64
+ lon float64
+ geohash string
+ distanceType string
+ optimizeBbox string
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+// NewGeoDistanceFilter creates a new GeoDistanceFilter.
+func NewGeoDistanceFilter(name string) GeoDistanceFilter {
+ f := GeoDistanceFilter{name: name}
+ return f
+}
+
+func (f GeoDistanceFilter) Distance(distance string) GeoDistanceFilter {
+ f.distance = distance
+ return f
+}
+
+func (f GeoDistanceFilter) GeoPoint(point *GeoPoint) GeoDistanceFilter {
+ f.lat = point.Lat
+ f.lon = point.Lon
+ return f
+}
+
+func (f GeoDistanceFilter) Point(lat, lon float64) GeoDistanceFilter {
+ f.lat = lat
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFilter) Lat(lat float64) GeoDistanceFilter {
+ f.lat = lat
+ return f
+}
+
+func (f GeoDistanceFilter) Lon(lon float64) GeoDistanceFilter {
+ f.lon = lon
+ return f
+}
+
+func (f GeoDistanceFilter) GeoHash(geohash string) GeoDistanceFilter {
+ f.geohash = geohash
+ return f
+}
+
+func (f GeoDistanceFilter) DistanceType(distanceType string) GeoDistanceFilter {
+ f.distanceType = distanceType
+ return f
+}
+
+func (f GeoDistanceFilter) OptimizeBbox(optimizeBbox string) GeoDistanceFilter {
+ f.optimizeBbox = optimizeBbox
+ return f
+}
+
+func (f GeoDistanceFilter) Cache(cache bool) GeoDistanceFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f GeoDistanceFilter) CacheKey(cacheKey string) GeoDistanceFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f GeoDistanceFilter) FilterName(filterName string) GeoDistanceFilter {
+ f.filterName = filterName
+ return f
+}
+
+// Creates the query source for the geo_distance filter.
+func (f GeoDistanceFilter) Source() interface{} {
+ // {
+ // "geo_distance" : {
+ // "distance" : "200km",
+ // "pin.location" : {
+ // "lat" : 40,
+ // "lon" : -70
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+
+ if f.geohash != "" {
+ params[f.name] = f.geohash
+ } else {
+ location := make(map[string]interface{})
+ location["lat"] = f.lat
+ location["lon"] = f.lon
+ params[f.name] = location
+ }
+
+ if f.distance != "" {
+ params["distance"] = f.distance
+ }
+ if f.distanceType != "" {
+ params["distance_type"] = f.distanceType
+ }
+ if f.optimizeBbox != "" {
+ params["optimize_bbox"] = f.optimizeBbox
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ source["geo_distance"] = params
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon.go
new file mode 100644
index 0000000..7032bcc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_geo_polygon.go
@@ -0,0 +1,81 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter allowing to include hits that only fall within a polygon of points.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-polygon-filter.html
+type GeoPolygonFilter struct {
+ Filter
+ name string
+ points []*GeoPoint
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewGeoPolygonFilter(name string) GeoPolygonFilter {
+ f := GeoPolygonFilter{name: name, points: make([]*GeoPoint, 0)}
+ return f
+}
+
+func (f GeoPolygonFilter) Cache(cache bool) GeoPolygonFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f GeoPolygonFilter) CacheKey(cacheKey string) GeoPolygonFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f GeoPolygonFilter) FilterName(filterName string) GeoPolygonFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f GeoPolygonFilter) AddPoint(point *GeoPoint) GeoPolygonFilter {
+ f.points = append(f.points, point)
+ return f
+}
+
+func (f GeoPolygonFilter) Source() interface{} {
+ // "geo_polygon" : {
+ // "person.location" : {
+ // "points" : [
+ // {"lat" : 40, "lon" : -70},
+ // {"lat" : 30, "lon" : -80},
+ // {"lat" : 20, "lon" : -90}
+ // ]
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["geo_polygon"] = params
+
+ polygon := make(map[string]interface{})
+ params[f.name] = polygon
+
+ points := make([]interface{}, 0)
+ for _, point := range f.points {
+ points = append(points, point.Source())
+ }
+ polygon["points"] = points
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child.go
new file mode 100644
index 0000000..6d291d1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_child.go
@@ -0,0 +1,125 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_child query works the same as the has_child filter,
+// by automatically wrapping the filter with a constant_score
+// (when using the default score type).
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html
+type HasChildFilter struct {
+ filter Filter
+ query Query
+ childType string
+ filterName string
+ cache *bool
+ cacheKey string
+ shortCircuitCutoff *int
+ minChildren *int
+ maxChildren *int
+ innerHit *InnerHit
+}
+
+// NewHasChildFilter creates a new has_child query.
+func NewHasChildFilter(childType string) HasChildFilter {
+ f := HasChildFilter{
+ childType: childType,
+ }
+ return f
+}
+
+func (f HasChildFilter) Query(query Query) HasChildFilter {
+ f.query = query
+ return f
+}
+
+func (f HasChildFilter) Filter(filter Filter) HasChildFilter {
+ f.filter = filter
+ return f
+}
+
+func (f HasChildFilter) FilterName(filterName string) HasChildFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f HasChildFilter) Cache(cache bool) HasChildFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f HasChildFilter) CacheKey(cacheKey string) HasChildFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f HasChildFilter) ShortCircuitCutoff(shortCircuitCutoff int) HasChildFilter {
+ f.shortCircuitCutoff = &shortCircuitCutoff
+ return f
+}
+
+func (f HasChildFilter) MinChildren(minChildren int) HasChildFilter {
+ f.minChildren = &minChildren
+ return f
+}
+
+func (f HasChildFilter) MaxChildren(maxChildren int) HasChildFilter {
+ f.maxChildren = &maxChildren
+ return f
+}
+
+func (f HasChildFilter) InnerHit(innerHit *InnerHit) HasChildFilter {
+ f.innerHit = innerHit
+ return f
+}
+
+// Source returns the JSON document for the filter.
+func (f HasChildFilter) Source() interface{} {
+ // {
+ // "has_child" : {
+ // "type" : "blog_tag",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ filter := make(map[string]interface{})
+ source["has_child"] = filter
+
+ if f.query != nil {
+ filter["query"] = f.query.Source()
+ } else if f.filter != nil {
+ filter["filter"] = f.filter.Source()
+ }
+
+ filter["type"] = f.childType
+ if f.filterName != "" {
+ filter["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ filter["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ filter["_cache_key"] = f.cacheKey
+ }
+ if f.shortCircuitCutoff != nil {
+ filter["short_circuit_cutoff"] = *f.shortCircuitCutoff
+ }
+ if f.minChildren != nil {
+ filter["min_children"] = *f.minChildren
+ }
+ if f.maxChildren != nil {
+ filter["max_children"] = *f.maxChildren
+ }
+ if f.innerHit != nil {
+ filter["inner_hits"] = f.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent.go
new file mode 100644
index 0000000..bfbbaa3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_has_parent.go
@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_parent filter accepts a query and a parent type.
+// The query is executed in the parent document space,
+// which is specified by the parent type.
+// This filter return child documents which associated parents have matched.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-parent-filter.html
+type HasParentFilter struct {
+ filter Filter
+ query Query
+ parentType string
+ filterName string
+ cache *bool
+ cacheKey string
+ innerHit *InnerHit
+}
+
+// NewHasParentFilter creates a new has_parent filter.
+func NewHasParentFilter(parentType string) HasParentFilter {
+ f := HasParentFilter{
+ parentType: parentType,
+ }
+ return f
+}
+
+func (f HasParentFilter) Query(query Query) HasParentFilter {
+ f.query = query
+ return f
+}
+
+func (f HasParentFilter) Filter(filter Filter) HasParentFilter {
+ f.filter = filter
+ return f
+}
+
+func (f HasParentFilter) FilterName(filterName string) HasParentFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f HasParentFilter) Cache(cache bool) HasParentFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f HasParentFilter) CacheKey(cacheKey string) HasParentFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f HasParentFilter) InnerHit(innerHit *InnerHit) HasParentFilter {
+ f.innerHit = innerHit
+ return f
+}
+
+// Source returns the JSON document for the filter.
+func (f HasParentFilter) Source() interface{} {
+ // {
+ // "has_parent" : {
+ // "parent_type" : "blog",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ filter := make(map[string]interface{})
+ source["has_parent"] = filter
+
+ if f.query != nil {
+ filter["query"] = f.query.Source()
+ } else if f.filter != nil {
+ filter["filter"] = f.filter.Source()
+ }
+
+ filter["parent_type"] = f.parentType
+ if f.filterName != "" {
+ filter["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ filter["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ filter["_cache_key"] = f.cacheKey
+ }
+ if f.innerHit != nil {
+ filter["inner_hits"] = f.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids.go
new file mode 100644
index 0000000..2a612c9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_ids.go
@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that only have the provided ids.
+// Note, this filter does not require the _id field to be indexed
+// since it works using the _uid field.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-ids-filter.html
+type IdsFilter struct {
+ Filter
+ types []string
+ values []string
+ filterName string
+}
+
+func NewIdsFilter(types ...string) IdsFilter {
+ return IdsFilter{
+ types: types,
+ values: make([]string, 0),
+ }
+}
+
+func (f IdsFilter) Ids(ids ...string) IdsFilter {
+ f.values = append(f.values, ids...)
+ return f
+}
+
+func (f IdsFilter) FilterName(filterName string) IdsFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f IdsFilter) Source() interface{} {
+ // {
+ // "ids" : {
+ // "type" : "my_type",
+ // "values" : ["1", "4", "100"]
+ // }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["ids"] = params
+
+ // type(s)
+ if len(f.types) == 1 {
+ params["type"] = f.types[0]
+ } else if len(f.types) > 1 {
+ params["types"] = f.types
+ }
+
+ // values
+ params["values"] = f.values
+
+ // filter name
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit.go
new file mode 100644
index 0000000..14f0d9d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_limit.go
@@ -0,0 +1,31 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A limit filter limits the number of documents (per shard) to execute on.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-limit-filter.html
+type LimitFilter struct {
+ Filter
+ limit int
+}
+
+func NewLimitFilter(limit int) LimitFilter {
+ f := LimitFilter{limit: limit}
+ return f
+}
+
+func (f LimitFilter) Source() interface{} {
+ // {
+ // "limit" : {
+ // "value" : "..."
+ // }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["limit"] = params
+ params["value"] = f.limit
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all.go
new file mode 100644
index 0000000..5092e6d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_match_all.go
@@ -0,0 +1,25 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches on all documents.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-match-all-filter.html
+type MatchAllFilter struct {
+ Filter
+}
+
+func NewMatchAllFilter() MatchAllFilter {
+ return MatchAllFilter{}
+}
+
+func (f MatchAllFilter) Source() interface{} {
+ // {
+ // "match_all" : {}
+ // }
+ source := make(map[string]interface{})
+ source["match_all"] = make(map[string]interface{})
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing.go
new file mode 100644
index 0000000..d734281
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_missing.go
@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents where a specific field has no value in them.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-missing-filter.html
+type MissingFilter struct {
+ Filter
+ name string
+ filterName string
+ nullValue *bool
+ existence *bool
+}
+
+func NewMissingFilter(name string) MissingFilter {
+ f := MissingFilter{name: name}
+ return f
+}
+
+func (f MissingFilter) FilterName(filterName string) MissingFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f MissingFilter) NullValue(nullValue bool) MissingFilter {
+ f.nullValue = &nullValue
+ return f
+}
+
+func (f MissingFilter) Existence(existence bool) MissingFilter {
+ f.existence = &existence
+ return f
+}
+
+func (f MissingFilter) Source() interface{} {
+ // {
+ // "missing" : {
+ // "field" : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["missing"] = params
+ params["field"] = f.name
+ if f.nullValue != nil {
+ params["null_value"] = *f.nullValue
+ }
+ if f.existence != nil {
+ params["existence"] = *f.existence
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested.go
new file mode 100644
index 0000000..222f43d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_nested.go
@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A nested filter, works in a similar fashion to the nested query,
+// except used as a filter. It follows exactly the same structure, but
+// also allows to cache the results (set _cache to true),
+// and have it named (set the _name value).
+//
+// For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/nested-filter/
+type NestedFilter struct {
+ query Query
+ filter Filter
+ path string
+ join *bool
+ cache *bool
+ cacheKey string
+ filterName string
+ innerHit *InnerHit
+}
+
+func NewNestedFilter(path string) NestedFilter {
+ return NestedFilter{path: path}
+}
+
+func (f NestedFilter) Query(query Query) NestedFilter {
+ f.query = query
+ return f
+}
+
+func (f NestedFilter) Filter(filter Filter) NestedFilter {
+ f.filter = filter
+ return f
+}
+
+func (f NestedFilter) Path(path string) NestedFilter {
+ f.path = path
+ return f
+}
+
+func (f NestedFilter) Join(join bool) NestedFilter {
+ f.join = &join
+ return f
+}
+
+func (f NestedFilter) Cache(cache bool) NestedFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f NestedFilter) CacheKey(cacheKey string) NestedFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f NestedFilter) FilterName(filterName string) NestedFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f NestedFilter) InnerHit(innerHit *InnerHit) NestedFilter {
+ f.innerHit = innerHit
+ return f
+}
+
+func (f NestedFilter) Source() interface{} {
+ // {
+ // "filtered" : {
+ // "query" : { "match_all" : {} },
+ // "filter" : {
+ // "nested" : {
+ // "path" : "obj1",
+ // "query" : {
+ // "bool" : {
+ // "must" : [
+ // {
+ // "match" : {"obj1.name" : "blue"}
+ // },
+ // {
+ // "range" : {"obj1.count" : {"gt" : 5}}
+ // }
+ // ]
+ // }
+ // },
+ // "_cache" : true
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["nested"] = params
+
+ if f.query != nil {
+ params["query"] = f.query.Source()
+ }
+ if f.filter != nil {
+ params["filter"] = f.filter.Source()
+ }
+ if f.join != nil {
+ params["join"] = *f.join
+ }
+ params["path"] = f.path
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.innerHit != nil {
+ params["inner_hits"] = f.innerHit.Source()
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not.go
new file mode 100644
index 0000000..3dc0c2d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_not.go
@@ -0,0 +1,62 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that filters out matched documents using a query. Can be placed
+// within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-not-filter.html#query-dsl-not-filter.
+type NotFilter struct {
+ filter Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewNotFilter(filter Filter) NotFilter {
+ return NotFilter{
+ filter: filter,
+ }
+}
+
+func (f NotFilter) Cache(cache bool) NotFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f NotFilter) CacheKey(cacheKey string) NotFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f NotFilter) FilterName(filterName string) NotFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f NotFilter) Source() interface{} {
+ // {
+ // "not" : {
+ // "filter" : { ... }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["not"] = params
+ params["filter"] = f.filter.Source()
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or.go
new file mode 100644
index 0000000..31b2c67
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_or.go
@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter that matches documents using OR boolean operator
+// on other queries. Can be placed within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-or-filter.html
+type OrFilter struct {
+ filters []Filter
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewOrFilter(filters ...Filter) OrFilter {
+ f := OrFilter{
+ filters: make([]Filter, 0),
+ }
+ if len(filters) > 0 {
+ f.filters = append(f.filters, filters...)
+ }
+ return f
+}
+
+func (f OrFilter) Add(filter Filter) OrFilter {
+ f.filters = append(f.filters, filter)
+ return f
+}
+
+func (f OrFilter) Cache(cache bool) OrFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f OrFilter) CacheKey(cacheKey string) OrFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f OrFilter) FilterName(filterName string) OrFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f OrFilter) Source() interface{} {
+ // {
+ // "or" : [
+ // ... filters ...
+ // ]
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["or"] = params
+
+ filters := make([]interface{}, len(f.filters))
+ params["filters"] = filters
+ for i, filter := range f.filters {
+ filters[i] = filter.Source()
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix.go
new file mode 100644
index 0000000..a2f5273
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_prefix.go
@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that have fiels containing terms
+// with a specified prefix (not analyzed).
+// For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/prefix-filter.html
+type PrefixFilter struct {
+ Filter
+ name string
+ prefix string
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewPrefixFilter(name string, prefix string) PrefixFilter {
+ f := PrefixFilter{name: name, prefix: prefix}
+ return f
+}
+
+func (f PrefixFilter) Cache(cache bool) PrefixFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f PrefixFilter) CacheKey(cacheKey string) PrefixFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f PrefixFilter) FilterName(filterName string) PrefixFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f PrefixFilter) Source() interface{} {
+ // {
+ // "prefix" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["prefix"] = params
+
+ params[f.name] = f.prefix
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query.go
new file mode 100644
index 0000000..2fc7c4c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_query.go
@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// QueryFilter wraps any query to be used as a filter. It can be placed
+// within queries that accept a filter.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-query-filter.html
+type QueryFilter struct {
+ Filter
+ name string
+ query Query
+ cache *bool
+ filterName string
+}
+
+func NewQueryFilter(query Query) QueryFilter {
+ f := QueryFilter{query: query}
+ return f
+}
+
+func (f QueryFilter) Name(name string) QueryFilter {
+ f.name = name
+ return f
+}
+
+func (f QueryFilter) Query(query Query) QueryFilter {
+ f.query = query
+ return f
+}
+
+func (f QueryFilter) Cache(cache bool) QueryFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f QueryFilter) FilterName(filterName string) QueryFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f QueryFilter) Source() interface{} {
+ // {
+ // "query" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ if f.filterName == "" && (f.cache == nil || *f.cache == false) {
+ source["query"] = f.query.Source()
+ } else {
+ params := make(map[string]interface{})
+ source["fquery"] = params
+ params["query"] = f.query.Source()
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range.go
new file mode 100644
index 0000000..4fc1349
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_range.go
@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents with fields that have terms within
+// a certain range. For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html
+type RangeFilter struct {
+ Filter
+ name string
+ from *interface{}
+ to *interface{}
+ timeZone string
+ format string
+ includeLower bool
+ includeUpper bool
+ cache *bool
+ cacheKey string
+ filterName string
+ execution string
+}
+
+func NewRangeFilter(name string) RangeFilter {
+ f := RangeFilter{name: name, includeLower: true, includeUpper: true}
+ return f
+}
+
+// TimeZone allows for adjusting the from/to fields using a time zone.
+// Only valid for date fields.
+func (f RangeFilter) TimeZone(timeZone string) RangeFilter {
+ f.timeZone = timeZone
+ return f
+}
+
+// Format is a valid option for date fields in a Range filter.
+func (f RangeFilter) Format(format string) RangeFilter {
+ f.format = format
+ return f
+}
+
+func (f RangeFilter) From(from interface{}) RangeFilter {
+ f.from = &from
+ return f
+}
+
+func (f RangeFilter) Gt(from interface{}) RangeFilter {
+ f.from = &from
+ f.includeLower = false
+ return f
+}
+
+func (f RangeFilter) Gte(from interface{}) RangeFilter {
+ f.from = &from
+ f.includeLower = true
+ return f
+}
+
+func (f RangeFilter) To(to interface{}) RangeFilter {
+ f.to = &to
+ return f
+}
+
+func (f RangeFilter) Lt(to interface{}) RangeFilter {
+ f.to = &to
+ f.includeUpper = false
+ return f
+}
+
+func (f RangeFilter) Lte(to interface{}) RangeFilter {
+ f.to = &to
+ f.includeUpper = true
+ return f
+}
+
+func (f RangeFilter) IncludeLower(includeLower bool) RangeFilter {
+ f.includeLower = includeLower
+ return f
+}
+
+func (f RangeFilter) IncludeUpper(includeUpper bool) RangeFilter {
+ f.includeUpper = includeUpper
+ return f
+}
+
+func (f RangeFilter) Cache(cache bool) RangeFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f RangeFilter) CacheKey(cacheKey string) RangeFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f RangeFilter) FilterName(filterName string) RangeFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f RangeFilter) Execution(execution string) RangeFilter {
+ f.execution = execution
+ return f
+}
+
+func (f RangeFilter) Source() interface{} {
+ // {
+ // "range" : {
+ // "name" : {
+ // "..." : "..."
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ rangeQ := make(map[string]interface{})
+ source["range"] = rangeQ
+
+ params := make(map[string]interface{})
+ rangeQ[f.name] = params
+
+ params["from"] = f.from
+ params["to"] = f.to
+ if f.timeZone != "" {
+ params["time_zone"] = f.timeZone
+ }
+ if f.format != "" {
+ params["format"] = f.format
+ }
+ params["include_lower"] = f.includeLower
+ params["include_upper"] = f.includeUpper
+
+ if f.filterName != "" {
+ rangeQ["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ rangeQ["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ rangeQ["_cache_key"] = f.cacheKey
+ }
+
+ if f.execution != "" {
+ rangeQ["execution"] = f.execution
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp.go
new file mode 100644
index 0000000..107a1e9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_regexp.go
@@ -0,0 +1,90 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// RegexpFilter allows filtering for regular expressions.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-filter.html
+// and http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html#regexp-syntax
+// for details.
+type RegexpFilter struct {
+ Filter
+ name string
+ regexp string
+ flags *string
+ maxDeterminizedStates *int
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+// NewRegexpFilter sets up a new RegexpFilter.
+func NewRegexpFilter(name, regexp string) RegexpFilter {
+ return RegexpFilter{name: name, regexp: regexp}
+}
+
+// Flags sets the regexp flags.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html#_optional_operators
+// for details.
+func (f RegexpFilter) Flags(flags string) RegexpFilter {
+ f.flags = &flags
+ return f
+}
+
+func (f RegexpFilter) MaxDeterminizedStates(maxDeterminizedStates int) RegexpFilter {
+ f.maxDeterminizedStates = &maxDeterminizedStates
+ return f
+}
+
+func (f RegexpFilter) Cache(cache bool) RegexpFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f RegexpFilter) CacheKey(cacheKey string) RegexpFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f RegexpFilter) FilterName(filterName string) RegexpFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f RegexpFilter) Source() interface{} {
+ // {
+ // "regexp" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["regexp"] = params
+
+ if f.flags == nil {
+ params[f.name] = f.regexp
+ } else {
+ x := make(map[string]interface{})
+ x["value"] = f.regexp
+ x["flags"] = *f.flags
+ if f.maxDeterminizedStates != nil {
+ x["max_determinized_states"] = *f.maxDeterminizedStates
+ }
+ params[f.name] = x
+ }
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term.go
new file mode 100644
index 0000000..db22f7a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_term.go
@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that have fields that contain
+// a term (not analyzed). For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/term-filter.html
+type TermFilter struct {
+ Filter
+ name string
+ value interface{}
+ cache *bool
+ cacheKey string
+ filterName string
+}
+
+func NewTermFilter(name string, value interface{}) TermFilter {
+ f := TermFilter{name: name, value: value}
+ return f
+}
+
+func (f TermFilter) Cache(cache bool) TermFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f TermFilter) CacheKey(cacheKey string) TermFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f TermFilter) FilterName(filterName string) TermFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f TermFilter) Source() interface{} {
+ // {
+ // "term" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["term"] = params
+
+ params[f.name] = f.value
+
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms.go
new file mode 100644
index 0000000..1705c43
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_terms.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that have fields that match
+// any of the provided terms (not analyzed). For details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/terms-filter/
+type TermsFilter struct {
+ Filter
+ name string
+ values []interface{}
+ cache *bool
+ cacheKey string
+ filterName string
+ execution string
+}
+
+func NewTermsFilter(name string, values ...interface{}) TermsFilter {
+ f := TermsFilter{
+ name: name,
+ values: make([]interface{}, 0),
+ }
+ f.values = append(f.values, values...)
+ return f
+}
+
+func (f TermsFilter) Cache(cache bool) TermsFilter {
+ f.cache = &cache
+ return f
+}
+
+func (f TermsFilter) CacheKey(cacheKey string) TermsFilter {
+ f.cacheKey = cacheKey
+ return f
+}
+
+func (f TermsFilter) FilterName(filterName string) TermsFilter {
+ f.filterName = filterName
+ return f
+}
+
+func (f TermsFilter) Execution(execution string) TermsFilter {
+ f.execution = execution
+ return f
+}
+
+func (f TermsFilter) Source() interface{} {
+ // {
+ // "terms" : {
+ // "..." : "..."
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["terms"] = params
+ params[f.name] = f.values
+ if f.filterName != "" {
+ params["_name"] = f.filterName
+ }
+ if f.execution != "" {
+ params["execution"] = f.execution
+ }
+ if f.cache != nil {
+ params["_cache"] = *f.cache
+ }
+ if f.cacheKey != "" {
+ params["_cache_key"] = f.cacheKey
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type.go
new file mode 100644
index 0000000..f64a244
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_filters_type.go
@@ -0,0 +1,33 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents matching the provided document / mapping type.
+// Note, this filter can work even when the _type field is not indexed
+// (using the _uid field).
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-type-filter.html
+type TypeFilter struct {
+ Filter
+ typ string
+}
+
+func NewTypeFilter(typ string) TypeFilter {
+ f := TypeFilter{typ: typ}
+ return f
+}
+
+func (f TypeFilter) Source() interface{} {
+ // {
+ // "type" : {
+ // "value" : "..."
+ // }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["type"] = params
+ params["value"] = f.typ
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool.go
new file mode 100644
index 0000000..9fc053c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_bool.go
@@ -0,0 +1,153 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A bool query matches documents matching boolean
+// combinations of other queries.
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html
+type BoolQuery struct {
+ Query
+ mustClauses []Query
+ shouldClauses []Query
+ mustNotClauses []Query
+ boost *float32
+ disableCoord *bool
+ minimumShouldMatch string
+ adjustPureNegative *bool
+ queryName string
+}
+
+// Creates a new bool query.
+func NewBoolQuery() BoolQuery {
+ q := BoolQuery{
+ mustClauses: make([]Query, 0),
+ shouldClauses: make([]Query, 0),
+ mustNotClauses: make([]Query, 0),
+ }
+ return q
+}
+
+func (q BoolQuery) Must(queries ...Query) BoolQuery {
+ q.mustClauses = append(q.mustClauses, queries...)
+ return q
+}
+
+func (q BoolQuery) MustNot(queries ...Query) BoolQuery {
+ q.mustNotClauses = append(q.mustNotClauses, queries...)
+ return q
+}
+
+func (q BoolQuery) Should(queries ...Query) BoolQuery {
+ q.shouldClauses = append(q.shouldClauses, queries...)
+ return q
+}
+
+func (q BoolQuery) Boost(boost float32) BoolQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q BoolQuery) DisableCoord(disableCoord bool) BoolQuery {
+ q.disableCoord = &disableCoord
+ return q
+}
+
+func (q BoolQuery) MinimumShouldMatch(minimumShouldMatch string) BoolQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q BoolQuery) AdjustPureNegative(adjustPureNegative bool) BoolQuery {
+ q.adjustPureNegative = &adjustPureNegative
+ return q
+}
+
+func (q BoolQuery) QueryName(queryName string) BoolQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the bool query.
+func (q BoolQuery) Source() interface{} {
+ // {
+ // "bool" : {
+ // "must" : {
+ // "term" : { "user" : "kimchy" }
+ // },
+ // "must_not" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // },
+ // "should" : [
+ // {
+ // "term" : { "tag" : "wow" }
+ // },
+ // {
+ // "term" : { "tag" : "elasticsearch" }
+ // }
+ // ],
+ // "minimum_number_should_match" : 1,
+ // "boost" : 1.0
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ boolClause := make(map[string]interface{})
+ query["bool"] = boolClause
+
+ // must
+ if len(q.mustClauses) == 1 {
+ boolClause["must"] = q.mustClauses[0].Source()
+ } else if len(q.mustClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.mustClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must"] = clauses
+ }
+
+ // must_not
+ if len(q.mustNotClauses) == 1 {
+ boolClause["must_not"] = q.mustNotClauses[0].Source()
+ } else if len(q.mustNotClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.mustNotClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["must_not"] = clauses
+ }
+
+ // should
+ if len(q.shouldClauses) == 1 {
+ boolClause["should"] = q.shouldClauses[0].Source()
+ } else if len(q.shouldClauses) > 1 {
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.shouldClauses {
+ clauses = append(clauses, subQuery.Source())
+ }
+ boolClause["should"] = clauses
+ }
+
+ if q.boost != nil {
+ boolClause["boost"] = *q.boost
+ }
+ if q.disableCoord != nil {
+ boolClause["disable_coord"] = *q.disableCoord
+ }
+ if q.minimumShouldMatch != "" {
+ boolClause["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.adjustPureNegative != nil {
+ boolClause["adjust_pure_negative"] = *q.adjustPureNegative
+ }
+ if q.queryName != "" {
+ boolClause["_name"] = q.queryName
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting.go
new file mode 100644
index 0000000..29b7a62
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_boosting.go
@@ -0,0 +1,89 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A boosting query can be used to effectively
+// demote results that match a given query.
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-boosting-query.html
+type BoostingQuery struct {
+ Query
+ positiveClause Query
+ negativeClause Query
+ negativeBoost *float64
+ boost *float64
+}
+
+// Creates a new boosting query.
+func NewBoostingQuery() BoostingQuery {
+ return BoostingQuery{}
+}
+
+func (q BoostingQuery) Positive(positive Query) BoostingQuery {
+ q.positiveClause = positive
+ return q
+}
+
+func (q BoostingQuery) Negative(negative Query) BoostingQuery {
+ q.negativeClause = negative
+ return q
+}
+
+func (q BoostingQuery) NegativeBoost(negativeBoost float64) BoostingQuery {
+ q.negativeBoost = &negativeBoost
+ return q
+}
+
+func (q BoostingQuery) Boost(boost float64) BoostingQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the boosting query.
+func (q BoostingQuery) Source() interface{} {
+ // {
+ // "boosting" : {
+ // "positive" : {
+ // "term" : {
+ // "field1" : "value1"
+ // }
+ // },
+ // "negative" : {
+ // "term" : {
+ // "field2" : "value2"
+ // }
+ // },
+ // "negative_boost" : 0.2
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ boostingClause := make(map[string]interface{})
+ query["boosting"] = boostingClause
+
+ // Negative and positive clause as well as negative boost
+ // are mandatory in the Java client.
+
+ // positive
+ if q.positiveClause != nil {
+ boostingClause["positive"] = q.positiveClause.Source()
+ }
+
+ // negative
+ if q.negativeClause != nil {
+ boostingClause["negative"] = q.negativeClause.Source()
+ }
+
+ if q.negativeBoost != nil {
+ boostingClause["negative_boost"] = *q.negativeBoost
+ }
+
+ if q.boost != nil {
+ boostingClause["boost"] = *q.boost
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common.go
new file mode 100644
index 0000000..f15f868
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_common.go
@@ -0,0 +1,144 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The common terms query is a modern alternative to stopwords
+// which improves the precision and recall of search results
+// (by taking stopwords into account), without sacrificing performance.
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/common-terms-query/
+type CommonQuery struct {
+ Query
+ name string
+ query string
+ cutoffFreq *float64
+ highFreq *float64
+ highFreqOp string
+ highFreqMinMatch interface{}
+ lowFreq *float64
+ lowFreqOp string
+ lowFreqMinMatch interface{}
+ analyzer string
+ boost *float64
+ disableCoords *bool
+}
+
+// Creates a new common query.
+func NewCommonQuery(name string, query string) CommonQuery {
+ q := CommonQuery{name: name, query: query}
+ return q
+}
+
+func (q *CommonQuery) CutoffFrequency(f float64) *CommonQuery {
+ q.cutoffFreq = &f
+ return q
+}
+
+func (q *CommonQuery) HighFreq(f float64) *CommonQuery {
+ q.highFreq = &f
+ return q
+}
+
+func (q *CommonQuery) HighFreqOperator(op string) *CommonQuery {
+ q.highFreqOp = op
+ return q
+}
+
+func (q *CommonQuery) HighFreqMinMatch(min interface{}) *CommonQuery {
+ q.highFreqMinMatch = min
+ return q
+}
+
+func (q *CommonQuery) LowFreq(f float64) *CommonQuery {
+ q.lowFreq = &f
+ return q
+}
+
+func (q *CommonQuery) LowFreqOperator(op string) *CommonQuery {
+ q.lowFreqOp = op
+ return q
+}
+
+func (q *CommonQuery) LowFreqMinMatch(min interface{}) *CommonQuery {
+ q.lowFreqMinMatch = min
+ return q
+}
+
+func (q *CommonQuery) Analyzer(analyzer string) *CommonQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q *CommonQuery) Boost(boost float64) *CommonQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q *CommonQuery) DisableCoords(disable bool) *CommonQuery {
+ q.disableCoords = &disable
+ return q
+}
+
+// Creates the query source for the common query.
+func (q CommonQuery) Source() interface{} {
+ // {
+ // "common": {
+ // "body": {
+ // "query": "this is bonsai cool",
+ // "cutoff_frequency": 0.001
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+ body := make(map[string]interface{})
+ query := make(map[string]interface{})
+
+ source["common"] = body
+ body[q.name] = query
+ query["query"] = q.query
+
+ if q.cutoffFreq != nil {
+ query["cutoff_frequency"] = *(q.cutoffFreq)
+ }
+
+ if q.highFreq != nil {
+ query["high_freq"] = *(q.highFreq)
+ }
+ if q.highFreqOp != "" {
+ query["high_freq_operator"] = q.highFreqOp
+ }
+
+ if q.lowFreq != nil {
+ query["low_freq"] = *(q.lowFreq)
+ }
+ if q.lowFreqOp != "" {
+ query["low_freq_operator"] = q.lowFreqOp
+ }
+
+ if q.lowFreqMinMatch != nil || q.highFreqMinMatch != nil {
+ mm := make(map[string]interface{})
+ if q.lowFreqMinMatch != nil {
+ mm["low_freq"] = q.lowFreqMinMatch
+ }
+ if q.highFreqMinMatch != nil {
+ mm["high_freq"] = q.highFreqMinMatch
+ }
+ query["minimum_should_match"] = mm
+ }
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+
+ if q.disableCoords != nil {
+ query["disable_coords"] = *(q.disableCoords)
+ }
+
+ if q.boost != nil {
+ query["boost"] = *(q.boost)
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_filters_score.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_filters_score.go
new file mode 100644
index 0000000..f0503a3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_filters_score.go
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A custom_filters_score query allows to execute a query,
+// and if the hit matches a provided filter (ordered),
+// use either a boost or a script associated with it to compute the score.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/custom-filters-score-query/
+type CustomFiltersScoreQuery struct {
+ query Query
+ filters []Filter
+ scoreMode string
+ maxBoost *float32
+ script string
+}
+
+// Creates a new custom_filters_score query.
+func NewCustomFiltersScoreQuery() CustomFiltersScoreQuery {
+ q := CustomFiltersScoreQuery{
+ filters: make([]Filter, 0),
+ }
+ return q
+}
+
+func (q CustomFiltersScoreQuery) Query(query Query) CustomFiltersScoreQuery {
+ q.query = query
+ return q
+}
+
+func (q CustomFiltersScoreQuery) Filter(filter Filter) CustomFiltersScoreQuery {
+ q.filters = append(q.filters, filter)
+ return q
+}
+
+func (q CustomFiltersScoreQuery) ScoreMode(scoreMode string) CustomFiltersScoreQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+func (q CustomFiltersScoreQuery) MaxBoost(maxBoost float32) CustomFiltersScoreQuery {
+ q.maxBoost = &maxBoost
+ return q
+}
+
+func (q CustomFiltersScoreQuery) Script(script string) CustomFiltersScoreQuery {
+ q.script = script
+ return q
+}
+
+// Creates the query source for the custom_filters_score query.
+func (q CustomFiltersScoreQuery) Source() interface{} {
+ // {
+ // "custom_filters_score" : {
+ // "query" : {
+ // "match_all" : {}
+ // },
+ // "filters" : [
+ // {
+ // "filter" : { "range" : { "age" : {"from" : 0, "to" : 10} } },
+ // "boost" : "3"
+ // },
+ // {
+ // "filter" : { "range" : { "age" : {"from" : 10, "to" : 20} } },
+ // "boost" : "2"
+ // }
+ // ],
+ // "score_mode" : "first"
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ cfs := make(map[string]interface{})
+ query["custom_filters_score"] = cfs
+
+ // query
+ if q.query != nil {
+ cfs["query"] = q.query.Source()
+ }
+ // filters
+ clauses := make([]interface{}, 0)
+ for _, filter := range q.filters {
+ clauses = append(clauses, filter.Source())
+ }
+ cfs["filters"] = clauses
+
+ // scoreMode
+ if q.scoreMode != "" {
+ cfs["score_mode"] = q.scoreMode
+ }
+
+ // max_boost
+ if q.maxBoost != nil {
+ cfs["max_boost"] = *q.maxBoost
+ }
+
+ // script
+ if q.script != "" {
+ cfs["script"] = q.script
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_score.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_score.go
new file mode 100644
index 0000000..8eadfcb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_custom_score.go
@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// custom_score query allows to wrap another query and customize
+// the scoring of it optionally with a computation derived from
+// other field values in the doc (numeric ones) using script expression.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/custom-score-query/
+type CustomScoreQuery struct {
+ query Query
+ filter Filter
+ script string
+ lang string
+ boost *float32
+ params map[string]interface{}
+}
+
+// Creates a new custom_score query.
+func NewCustomScoreQuery() CustomScoreQuery {
+ q := CustomScoreQuery{
+ params: make(map[string]interface{}),
+ }
+ return q
+}
+
+func (q CustomScoreQuery) Query(query Query) CustomScoreQuery {
+ q.query = query
+ return q
+}
+
+func (q CustomScoreQuery) Filter(filter Filter) CustomScoreQuery {
+ q.filter = filter
+ return q
+}
+
+func (q CustomScoreQuery) Script(script string) CustomScoreQuery {
+ q.script = script
+ return q
+}
+
+func (q CustomScoreQuery) Lang(lang string) CustomScoreQuery {
+ q.lang = lang
+ return q
+}
+
+func (q CustomScoreQuery) Boost(boost float32) CustomScoreQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q CustomScoreQuery) Params(params map[string]interface{}) CustomScoreQuery {
+ q.params = params
+ return q
+}
+
+func (q CustomScoreQuery) Param(name string, value interface{}) CustomScoreQuery {
+ q.params[name] = value
+ return q
+}
+
+// Creates the query source for the custom_fscore query.
+func (q CustomScoreQuery) Source() interface{} {
+ // "custom_score" : {
+ // "query" : {
+ // ....
+ // },
+ // "params" : {
+ // "param1" : 2,
+ // "param2" : 3.1
+ // },
+ // "script" : "_score * doc['my_numeric_field'].value / pow(param1, param2)"
+ // }
+
+ query := make(map[string]interface{})
+
+ csq := make(map[string]interface{})
+ query["custom_score"] = csq
+
+ // query
+ if q.query != nil {
+ csq["query"] = q.query.Source()
+ } else if q.filter != nil {
+ csq["filter"] = q.filter.Source()
+ }
+
+ csq["script"] = q.script
+
+ // lang
+ if q.lang != "" {
+ csq["lang"] = q.lang
+ }
+
+ // params
+ if len(q.params) > 0 {
+ csq["params"] = q.params
+ }
+
+ // boost
+ if q.boost != nil {
+ csq["boost"] = *q.boost
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_dis_max.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_dis_max.go
new file mode 100644
index 0000000..76be783
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_dis_max.go
@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that generates the union of documents produced by its subqueries,
+// and that scores each document with the maximum score for that document
+// as produced by any subquery, plus a tie breaking increment for
+// any additional matching subqueries.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/dis-max-query/
+type DisMaxQuery struct {
+ queries []Query
+ boost *float32
+ tieBreaker *float32
+}
+
+// Creates a new dis_max query.
+func NewDisMaxQuery() DisMaxQuery {
+ q := DisMaxQuery{
+ queries: make([]Query, 0),
+ }
+ return q
+}
+
+func (q DisMaxQuery) Query(query Query) DisMaxQuery {
+ q.queries = append(q.queries, query)
+ return q
+}
+
+func (q DisMaxQuery) Boost(boost float32) DisMaxQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q DisMaxQuery) TieBreaker(tieBreaker float32) DisMaxQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+// Creates the query source for the dis_max query.
+func (q DisMaxQuery) Source() interface{} {
+ // {
+ // "dis_max" : {
+ // "tie_breaker" : 0.7,
+ // "boost" : 1.2,
+ // "queries" : {
+ // {
+ // "term" : { "age" : 34 }
+ // },
+ // {
+ // "term" : { "age" : 35 }
+ // }
+ // ]
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ disMax := make(map[string]interface{})
+ query["dis_max"] = disMax
+
+ // tieBreaker
+ if q.tieBreaker != nil {
+ disMax["tie_breaker"] = *q.tieBreaker
+ }
+
+ // boost
+ if q.boost != nil {
+ disMax["boost"] = *q.boost
+ }
+
+ // queries
+ clauses := make([]interface{}, 0)
+ for _, subQuery := range q.queries {
+ clauses = append(clauses, subQuery.Source())
+ }
+ disMax["queries"] = clauses
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_filtered.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_filtered.go
new file mode 100644
index 0000000..a58b20a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_filtered.go
@@ -0,0 +1,86 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that applies a filter to the results of another query.
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/filtered-query.html
+type FilteredQuery struct {
+ Query
+ query Query
+ filters []Filter
+ boost *float32
+}
+
+// Creates a new filtered query.
+func NewFilteredQuery(query Query) FilteredQuery {
+ q := FilteredQuery{
+ query: query,
+ filters: make([]Filter, 0),
+ }
+ return q
+}
+
+func (q FilteredQuery) Filter(filter Filter) FilteredQuery {
+ q.filters = append(q.filters, filter)
+ return q
+}
+
+func (q FilteredQuery) Boost(boost float32) FilteredQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the filtered query.
+func (q FilteredQuery) Source() interface{} {
+ // {
+ // "filtered" : {
+ // "query" : {
+ // "term" : { "tag" : "wow" }
+ // },
+ // "filter" : {
+ // "range" : {
+ // "age" : { "from" : 10, "to" : 20 }
+ // }
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ filtered := make(map[string]interface{})
+ source["filtered"] = filtered
+
+ filtered["query"] = q.query.Source()
+
+ if len(q.filters) == 1 {
+ filtered["filter"] = q.filters[0].Source()
+ } else if len(q.filters) > 1 {
+ filter := make(map[string]interface{})
+ filtered["filter"] = filter
+ and := make(map[string]interface{})
+ filter["and"] = and
+ filters := make([]interface{}, 0)
+ for _, f := range q.filters {
+ filters = append(filters, f.Source())
+ }
+ and["filters"] = filters
+ /*
+ anded := make([]map[string]interface{}, 0)
+ filtered["filter"] = anded
+ for _, f := range q.filters {
+ andElem := make(map[string]interface{})
+ andElem["and"] = f.Source()
+ anded = append(anded, andElem)
+ }
+ */
+ }
+
+ if q.boost != nil {
+ filtered["boost"] = *q.boost
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq.go
new file mode 100644
index 0000000..6f2f3e8
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq.go
@@ -0,0 +1,137 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The function_score allows you to modify the score of documents that
+// are retrieved by a query. This can be useful if, for example,
+// a score function is computationally expensive and it is sufficient
+// to compute the score on a filtered set of documents.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+type FunctionScoreQuery struct {
+ query Query
+ filter Filter
+ boost *float32
+ maxBoost *float32
+ scoreMode string
+ boostMode string
+ filters []Filter
+ scoreFuncs []ScoreFunction
+ minScore *float32
+ weight *float64
+}
+
+// NewFunctionScoreQuery creates a new function score query.
+func NewFunctionScoreQuery() FunctionScoreQuery {
+ return FunctionScoreQuery{
+ filters: make([]Filter, 0),
+ scoreFuncs: make([]ScoreFunction, 0),
+ }
+}
+
+func (q FunctionScoreQuery) Query(query Query) FunctionScoreQuery {
+ q.query = query
+ q.filter = nil
+ return q
+}
+
+func (q FunctionScoreQuery) Filter(filter Filter) FunctionScoreQuery {
+ q.query = nil
+ q.filter = filter
+ return q
+}
+
+func (q FunctionScoreQuery) Add(filter Filter, scoreFunc ScoreFunction) FunctionScoreQuery {
+ q.filters = append(q.filters, filter)
+ q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
+ return q
+}
+
+func (q FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) FunctionScoreQuery {
+ q.filters = append(q.filters, nil)
+ q.scoreFuncs = append(q.scoreFuncs, scoreFunc)
+ return q
+}
+
+func (q FunctionScoreQuery) ScoreMode(scoreMode string) FunctionScoreQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+func (q FunctionScoreQuery) BoostMode(boostMode string) FunctionScoreQuery {
+ q.boostMode = boostMode
+ return q
+}
+
+func (q FunctionScoreQuery) MaxBoost(maxBoost float32) FunctionScoreQuery {
+ q.maxBoost = &maxBoost
+ return q
+}
+
+func (q FunctionScoreQuery) Boost(boost float32) FunctionScoreQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q FunctionScoreQuery) MinScore(minScore float32) FunctionScoreQuery {
+ q.minScore = &minScore
+ return q
+}
+
+// Source returns JSON for the function score query.
+func (q FunctionScoreQuery) Source() interface{} {
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["function_score"] = query
+
+ if q.query != nil {
+ query["query"] = q.query.Source()
+ } else if q.filter != nil {
+ query["filter"] = q.filter.Source()
+ }
+
+ if len(q.filters) == 1 && q.filters[0] == nil {
+ // Weight needs to be serialized on this level.
+ if weight := q.scoreFuncs[0].GetWeight(); weight != nil {
+ query["weight"] = weight
+ }
+ // Serialize the score function
+ query[q.scoreFuncs[0].Name()] = q.scoreFuncs[0].Source()
+ } else {
+ funcs := make([]interface{}, len(q.filters))
+ for i, filter := range q.filters {
+ hsh := make(map[string]interface{})
+ if filter != nil {
+ hsh["filter"] = filter.Source()
+ }
+ // Weight needs to be serialized on this level.
+ if weight := q.scoreFuncs[i].GetWeight(); weight != nil {
+ hsh["weight"] = weight
+ }
+ // Serialize the score function
+ hsh[q.scoreFuncs[i].Name()] = q.scoreFuncs[i].Source()
+ funcs[i] = hsh
+ }
+ query["functions"] = funcs
+ }
+
+ if q.scoreMode != "" {
+ query["score_mode"] = q.scoreMode
+ }
+ if q.boostMode != "" {
+ query["boost_mode"] = q.boostMode
+ }
+ if q.maxBoost != nil {
+ query["max_boost"] = *q.maxBoost
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.minScore != nil {
+ query["min_score"] = *q.minScore
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_score_funcs.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_score_funcs.go
new file mode 100644
index 0000000..5fde765
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fsq_score_funcs.go
@@ -0,0 +1,627 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "strings"
+)
+
+// ScoreFunction is used in combination with the Function Score Query.
+type ScoreFunction interface {
+ Name() string
+ GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery
+ Source() interface{}
+}
+
+// -- Exponential Decay --
+
+// ExponentialDecayFunction builds an exponential decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type ExponentialDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewExponentialDecayFunction creates a new ExponentialDecayFunction.
+func NewExponentialDecayFunction() ExponentialDecayFunction {
+ return ExponentialDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn ExponentialDecayFunction) Name() string {
+ return "exp"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn ExponentialDecayFunction) FieldName(fieldName string) ExponentialDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn ExponentialDecayFunction) Origin(origin interface{}) ExponentialDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn ExponentialDecayFunction) Scale(scale interface{}) ExponentialDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn ExponentialDecayFunction) Decay(decay float64) ExponentialDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn ExponentialDecayFunction) Offset(offset interface{}) ExponentialDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn ExponentialDecayFunction) Weight(weight float64) ExponentialDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn ExponentialDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn ExponentialDecayFunction) MultiValueMode(mode string) ExponentialDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn ExponentialDecayFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ return source
+}
+
+// -- Gauss Decay --
+
+// GaussDecayFunction builds a gauss decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type GaussDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewGaussDecayFunction returns a new GaussDecayFunction.
+func NewGaussDecayFunction() GaussDecayFunction {
+ return GaussDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn GaussDecayFunction) Name() string {
+ return "gauss"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn GaussDecayFunction) FieldName(fieldName string) GaussDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn GaussDecayFunction) Origin(origin interface{}) GaussDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn GaussDecayFunction) Scale(scale interface{}) GaussDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn GaussDecayFunction) Decay(decay float64) GaussDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn GaussDecayFunction) Offset(offset interface{}) GaussDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn GaussDecayFunction) Weight(weight float64) GaussDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn GaussDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn GaussDecayFunction) MultiValueMode(mode string) GaussDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn GaussDecayFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Linear Decay --
+
+// LinearDecayFunction builds a linear decay score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type LinearDecayFunction struct {
+ fieldName string
+ origin interface{}
+ scale interface{}
+ decay *float64
+ offset interface{}
+ multiValueMode string
+ weight *float64
+}
+
+// NewLinearDecayFunction initializes and returns a new LinearDecayFunction.
+func NewLinearDecayFunction() LinearDecayFunction {
+ return LinearDecayFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn LinearDecayFunction) Name() string {
+ return "linear"
+}
+
+// FieldName specifies the name of the field to which this decay function is applied to.
+func (fn LinearDecayFunction) FieldName(fieldName string) LinearDecayFunction {
+ fn.fieldName = fieldName
+ return fn
+}
+
+// Origin defines the "central point" by which the decay function calculates
+// "distance".
+func (fn LinearDecayFunction) Origin(origin interface{}) LinearDecayFunction {
+ fn.origin = origin
+ return fn
+}
+
+// Scale defines the scale to be used with Decay.
+func (fn LinearDecayFunction) Scale(scale interface{}) LinearDecayFunction {
+ fn.scale = scale
+ return fn
+}
+
+// Decay defines how documents are scored at the distance given a Scale.
+// If no decay is defined, documents at the distance Scale will be scored 0.5.
+func (fn LinearDecayFunction) Decay(decay float64) LinearDecayFunction {
+ fn.decay = &decay
+ return fn
+}
+
+// Offset, if defined, computes the decay function only for a distance
+// greater than the defined offset.
+func (fn LinearDecayFunction) Offset(offset interface{}) LinearDecayFunction {
+ fn.offset = offset
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn LinearDecayFunction) Weight(weight float64) LinearDecayFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn LinearDecayFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// MultiValueMode specifies how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn LinearDecayFunction) MultiValueMode(mode string) LinearDecayFunction {
+ fn.multiValueMode = mode
+ return fn
+}
+
+// GetMultiValueMode returns how the decay function should be calculated
+// on a field that has multiple values.
+// Valid modes are: min, max, avg, and sum.
+func (fn LinearDecayFunction) GetMultiValueMode() string {
+ return fn.multiValueMode
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn LinearDecayFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source[fn.fieldName] = params
+ if fn.origin != nil {
+ params["origin"] = fn.origin
+ }
+ params["scale"] = fn.scale
+ if fn.decay != nil && *fn.decay > 0 {
+ params["decay"] = *fn.decay
+ }
+ if fn.offset != nil {
+ params["offset"] = fn.offset
+ }
+ if fn.multiValueMode != "" {
+ source["multi_value_mode"] = fn.multiValueMode
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Script --
+
+// ScriptFunction builds a script score function. It uses a script to
+// compute or influence the score of documents that match with the inner
+// query or filter.
+//
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_script_score
+// for details.
+type ScriptFunction struct {
+ script string
+ lang string
+ params map[string]interface{}
+ weight *float64
+}
+
+// NewScriptFunction initializes and returns a new ScriptFunction.
+func NewScriptFunction(script string) ScriptFunction {
+ return ScriptFunction{
+ script: script,
+ params: make(map[string]interface{}),
+ }
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn ScriptFunction) Name() string {
+ return "script_score"
+}
+
+// Script specifies the script to be executed.
+func (fn ScriptFunction) Script(script string) ScriptFunction {
+ fn.script = script
+ return fn
+}
+
+// Lang specifies the language of the Script.
+func (fn ScriptFunction) Lang(lang string) ScriptFunction {
+ fn.lang = lang
+ return fn
+}
+
+// Param adds a single parameter to the script.
+func (fn ScriptFunction) Param(name string, value interface{}) ScriptFunction {
+ fn.params[name] = value
+ return fn
+}
+
+// Params sets all script parameters in a single step.
+func (fn ScriptFunction) Params(params map[string]interface{}) ScriptFunction {
+ fn.params = params
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn ScriptFunction) Weight(weight float64) ScriptFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn ScriptFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn ScriptFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ if fn.script != "" {
+ source["script"] = fn.script
+ }
+ if fn.lang != "" {
+ source["lang"] = fn.lang
+ }
+ if len(fn.params) > 0 {
+ source["params"] = fn.params
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Factor --
+
+// FactorFunction is deprecated.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html
+// for details.
+type FactorFunction struct {
+ boostFactor *float32
+}
+
+// NewFactorFunction initializes and returns a new FactorFunction.
+func NewFactorFunction() FactorFunction {
+ return FactorFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn FactorFunction) Name() string {
+ return "boost_factor"
+}
+
+// BoostFactor specifies a boost for this score function.
+func (fn FactorFunction) BoostFactor(boost float32) FactorFunction {
+ fn.boostFactor = &boost
+ return fn
+}
+
+// GetWeight always returns nil for (deprecated) FactorFunction.
+func (fn FactorFunction) GetWeight() *float64 {
+ return nil
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn FactorFunction) Source() interface{} {
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return fn.boostFactor
+}
+
+// -- Field value factor --
+
+// FieldValueFactorFunction is a function score function that allows you
+// to use a field from a document to influence the score.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_field_value_factor.
+type FieldValueFactorFunction struct {
+ field string
+ factor *float64
+ missing *float64
+ weight *float64
+ modifier string
+}
+
+// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction.
+func NewFieldValueFactorFunction() FieldValueFactorFunction {
+ return FieldValueFactorFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn FieldValueFactorFunction) Name() string {
+ return "field_value_factor"
+}
+
+// Field is the field to be extracted from the document.
+func (fn FieldValueFactorFunction) Field(field string) FieldValueFactorFunction {
+ fn.field = field
+ return fn
+}
+
+// Factor is the (optional) factor to multiply the field with. If you do not
+// specify a factor, the default is 1.
+func (fn FieldValueFactorFunction) Factor(factor float64) FieldValueFactorFunction {
+ fn.factor = &factor
+ return fn
+}
+
+// Modifier to apply to the field value. It can be one of: none, log, log1p,
+// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none.
+func (fn FieldValueFactorFunction) Modifier(modifier string) FieldValueFactorFunction {
+ fn.modifier = modifier
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn FieldValueFactorFunction) Weight(weight float64) FieldValueFactorFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn FieldValueFactorFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Missing is used if a document does not have that field.
+func (fn FieldValueFactorFunction) Missing(missing float64) FieldValueFactorFunction {
+ fn.missing = &missing
+ return fn
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn FieldValueFactorFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ if fn.field != "" {
+ source["field"] = fn.field
+ }
+ if fn.factor != nil {
+ source["factor"] = *fn.factor
+ }
+ if fn.missing != nil {
+ source["missing"] = *fn.missing
+ }
+ if fn.modifier != "" {
+ source["modifier"] = strings.ToLower(fn.modifier)
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
+
+// -- Weight Factor --
+
+// WeightFactorFunction builds a weight factor function that multiplies
+// the weight to the score.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_weight
+// for details.
+type WeightFactorFunction struct {
+ weight float64
+}
+
+// NewWeightFactorFunction initializes and returns a new WeightFactorFunction.
+func NewWeightFactorFunction(weight float64) WeightFactorFunction {
+ return WeightFactorFunction{weight: weight}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn WeightFactorFunction) Name() string {
+ return "weight"
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn WeightFactorFunction) Weight(weight float64) WeightFactorFunction {
+ fn.weight = weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn WeightFactorFunction) GetWeight() *float64 {
+ return &fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn WeightFactorFunction) Source() interface{} {
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return fn.weight
+}
+
+// -- Random --
+
+// RandomFunction builds a random score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_random
+// for details.
+type RandomFunction struct {
+ seed interface{}
+ weight *float64
+}
+
+// NewRandomFunction initializes and returns a new RandomFunction.
+func NewRandomFunction() RandomFunction {
+ return RandomFunction{}
+}
+
+// Name represents the JSON field name under which the output of Source
+// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source).
+func (fn RandomFunction) Name() string {
+ return "random_score"
+}
+
+// Seed is documented in 1.6 as a numeric value. However, in the source code
+// of the Java client, it also accepts strings. So we accept both here, too.
+func (fn RandomFunction) Seed(seed interface{}) RandomFunction {
+ fn.seed = seed
+ return fn
+}
+
+// Weight adjusts the score of the score function.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_using_function_score
+// for details.
+func (fn RandomFunction) Weight(weight float64) RandomFunction {
+ fn.weight = &weight
+ return fn
+}
+
+// GetWeight returns the adjusted score. It is part of the ScoreFunction interface.
+// Returns nil if weight is not specified.
+func (fn RandomFunction) GetWeight() *float64 {
+ return fn.weight
+}
+
+// Source returns the serializable JSON data of this score function.
+func (fn RandomFunction) Source() interface{} {
+ source := make(map[string]interface{})
+ if fn.seed != nil {
+ source["seed"] = fn.seed
+ }
+ // Notice that the weight has to be serialized in FunctionScoreQuery.
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy.go
new file mode 100644
index 0000000..22d83bb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy.go
@@ -0,0 +1,117 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyQuery uses similarity based on Levenshtein edit distance for
+// string fields, and a +/- margin on numeric and date fields.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html
+type FuzzyQuery struct {
+ Query
+
+ name string
+ value interface{}
+ boost float32
+ fuzziness interface{}
+ prefixLength *int
+ maxExpansions *int
+ transpositions *bool
+ queryName string
+}
+
+// NewFuzzyQuery creates a new fuzzy query.
+func NewFuzzyQuery() FuzzyQuery {
+ q := FuzzyQuery{
+ boost: -1.0,
+ }
+ return q
+}
+
+func (q FuzzyQuery) Name(name string) FuzzyQuery {
+ q.name = name
+ return q
+}
+
+func (q FuzzyQuery) Value(value interface{}) FuzzyQuery {
+ q.value = value
+ return q
+}
+
+func (q FuzzyQuery) Boost(boost float32) FuzzyQuery {
+ q.boost = boost
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings like "auto",
+// "0..1", "1..4" or "0.0..1.0".
+func (q FuzzyQuery) Fuzziness(fuzziness interface{}) FuzzyQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyQuery) PrefixLength(prefixLength int) FuzzyQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyQuery) MaxExpansions(maxExpansions int) FuzzyQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q FuzzyQuery) Transpositions(transpositions bool) FuzzyQuery {
+ q.transpositions = &transpositions
+ return q
+}
+
+func (q FuzzyQuery) QueryName(queryName string) FuzzyQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q FuzzyQuery) Source() interface{} {
+ // {
+ // "fuzzy" : {
+ // "user" : {
+ // "value" : "ki",
+ // "boost" : 1.0,
+ // "fuzziness" : 2,
+ // "prefix_length" : 0,
+ // "max_expansions" : 100
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["fuzzy"] = query
+
+ fq := make(map[string]interface{})
+ query[q.name] = fq
+
+ fq["value"] = q.value
+
+ if q.boost != -1.0 {
+ fq["boost"] = q.boost
+ }
+ if q.transpositions != nil {
+ fq["transpositions"] = *q.transpositions
+ }
+ if q.fuzziness != nil {
+ fq["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ fq["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ fq["max_expansions"] = *q.maxExpansions
+ }
+ if q.queryName != "" {
+ fq["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this.go
new file mode 100644
index 0000000..90a837d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this.go
@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyLikeThisQuery finds documents that are "like" provided text by
+// running it against one or more fields.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-flt-query.html
+type FuzzyLikeThisQuery struct {
+ Query
+
+ fields []string
+ boost *float32
+ likeText *string
+ fuzziness interface{}
+ prefixLength *int
+ maxQueryTerms *int
+ ignoreTF *bool
+ analyzer string
+ failOnUnsupportedField *bool
+ queryName string
+}
+
+// NewFuzzyLikeThisQuery creates a new fuzzy query.
+func NewFuzzyLikeThisQuery() FuzzyLikeThisQuery {
+ q := FuzzyLikeThisQuery{
+ fields: make([]string, 0),
+ }
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Field(field string) FuzzyLikeThisQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Fields(fields ...string) FuzzyLikeThisQuery {
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+func (q FuzzyLikeThisQuery) LikeText(likeText string) FuzzyLikeThisQuery {
+ q.likeText = &likeText
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings like "auto",
+// "0..1", "1..4" or "0.0..1.0".
+func (q FuzzyLikeThisQuery) Fuzziness(fuzziness interface{}) FuzzyLikeThisQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyLikeThisQuery) PrefixLength(prefixLength int) FuzzyLikeThisQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyLikeThisQuery) MaxQueryTerms(maxQueryTerms int) FuzzyLikeThisQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q FuzzyLikeThisQuery) IgnoreTF(ignoreTF bool) FuzzyLikeThisQuery {
+ q.ignoreTF = &ignoreTF
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Analyzer(analyzer string) FuzzyLikeThisQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q FuzzyLikeThisQuery) Boost(boost float32) FuzzyLikeThisQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q FuzzyLikeThisQuery) FailOnUnsupportedField(fail bool) FuzzyLikeThisQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+func (q FuzzyLikeThisQuery) QueryName(queryName string) FuzzyLikeThisQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q FuzzyLikeThisQuery) Source() interface{} {
+ // {
+ // "fuzzy_like_this" : {
+ // "fields" : ["name.first", "name.last"],
+ // "like_text" : "text like this one",
+ // "max_query_terms" : 12
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["fuzzy_like_this"] = query
+
+ if len(q.fields) > 0 {
+ query["fields"] = q.fields
+ }
+ query["like_text"] = q.likeText
+
+ if q.maxQueryTerms != nil {
+ query["max_query_terms"] = *q.maxQueryTerms
+ }
+ if q.fuzziness != nil {
+ query["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ query["prefix_length"] = *q.prefixLength
+ }
+ if q.ignoreTF != nil {
+ query["ignore_tf"] = *q.ignoreTF
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.failOnUnsupportedField != nil {
+ query["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query.go
new file mode 100644
index 0000000..eb0b531
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_fuzzy_like_this_field_query.go
@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyLikeThisFieldQuery is the same as the fuzzy_like_this query,
+// except that it runs against a single field. It provides nicer query DSL
+// over the generic fuzzy_like_this query, and support typed fields query
+// (automatically wraps typed fields with type filter to match only on the specific type).
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-flt-field-query.html
+type FuzzyLikeThisFieldQuery struct {
+ Query
+
+ field string
+ boost *float32
+ likeText *string
+ fuzziness interface{}
+ prefixLength *int
+ maxQueryTerms *int
+ ignoreTF *bool
+ analyzer string
+ failOnUnsupportedField *bool
+ queryName string
+}
+
+// NewFuzzyLikeThisFieldQuery creates a new fuzzy like this field query.
+func NewFuzzyLikeThisFieldQuery(field string) FuzzyLikeThisFieldQuery {
+ q := FuzzyLikeThisFieldQuery{
+ field: field,
+ }
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) LikeText(likeText string) FuzzyLikeThisFieldQuery {
+ q.likeText = &likeText
+ return q
+}
+
+// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings like "auto",
+// "0..1", "1..4" or "0.0..1.0".
+func (q FuzzyLikeThisFieldQuery) Fuzziness(fuzziness interface{}) FuzzyLikeThisFieldQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) PrefixLength(prefixLength int) FuzzyLikeThisFieldQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) MaxQueryTerms(maxQueryTerms int) FuzzyLikeThisFieldQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) IgnoreTF(ignoreTF bool) FuzzyLikeThisFieldQuery {
+ q.ignoreTF = &ignoreTF
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) Analyzer(analyzer string) FuzzyLikeThisFieldQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) Boost(boost float32) FuzzyLikeThisFieldQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) FailOnUnsupportedField(fail bool) FuzzyLikeThisFieldQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+func (q FuzzyLikeThisFieldQuery) QueryName(queryName string) FuzzyLikeThisFieldQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q FuzzyLikeThisFieldQuery) Source() interface{} {
+ // {
+ // "fuzzy_like_this_field" : {
+ // "name.first": {
+ // "like_text" : "text like this one",
+ // "max_query_terms" : 12
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["fuzzy_like_this_field"] = query
+ fq := make(map[string]interface{})
+ query[q.field] = fq
+
+ fq["like_text"] = q.likeText
+
+ if q.maxQueryTerms != nil {
+ fq["max_query_terms"] = *q.maxQueryTerms
+ }
+ if q.fuzziness != nil {
+ fq["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ fq["prefix_length"] = *q.prefixLength
+ }
+ if q.ignoreTF != nil {
+ fq["ignore_tf"] = *q.ignoreTF
+ }
+ if q.boost != nil {
+ fq["boost"] = *q.boost
+ }
+ if q.analyzer != "" {
+ fq["analyzer"] = q.analyzer
+ }
+ if q.failOnUnsupportedField != nil {
+ fq["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ if q.queryName != "" {
+ fq["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child.go
new file mode 100644
index 0000000..17bcb56
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_child.go
@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_child query works the same as the has_child filter,
+// by automatically wrapping the filter with a constant_score
+// (when using the default score type).
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-child-query.html
+type HasChildQuery struct {
+ query Query
+ childType string
+ boost *float32
+ scoreType string
+ minChildren *int
+ maxChildren *int
+ shortCircuitCutoff *int
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewHasChildQuery creates a new has_child query.
+func NewHasChildQuery(childType string, query Query) HasChildQuery {
+ q := HasChildQuery{
+ query: query,
+ childType: childType,
+ }
+ return q
+}
+
+func (q HasChildQuery) Boost(boost float32) HasChildQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q HasChildQuery) ScoreType(scoreType string) HasChildQuery {
+ q.scoreType = scoreType
+ return q
+}
+
+func (q HasChildQuery) MinChildren(minChildren int) HasChildQuery {
+ q.minChildren = &minChildren
+ return q
+}
+
+func (q HasChildQuery) MaxChildren(maxChildren int) HasChildQuery {
+ q.maxChildren = &maxChildren
+ return q
+}
+
+func (q HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) HasChildQuery {
+ q.shortCircuitCutoff = &shortCircuitCutoff
+ return q
+}
+
+func (q HasChildQuery) QueryName(queryName string) HasChildQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q HasChildQuery) InnerHit(innerHit *InnerHit) HasChildQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q HasChildQuery) Source() interface{} {
+ // {
+ // "has_child" : {
+ // "type" : "blog_tag",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["has_child"] = query
+
+ query["query"] = q.query.Source()
+ query["type"] = q.childType
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.scoreType != "" {
+ query["score_type"] = q.scoreType
+ }
+ if q.minChildren != nil {
+ query["min_children"] = *q.minChildren
+ }
+ if q.maxChildren != nil {
+ query["max_children"] = *q.maxChildren
+ }
+ if q.shortCircuitCutoff != nil {
+ query["short_circuit_cutoff"] = *q.shortCircuitCutoff
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ query["inner_hits"] = q.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent.go
new file mode 100644
index 0000000..ff22acd
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_has_parent.go
@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The has_parent query works the same as the has_parent filter,
+// by automatically wrapping the filter with a
+// constant_score (when using the default score type).
+// It has the same syntax as the has_parent filter.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-has-parent-query.html
+type HasParentQuery struct {
+ query Query
+ parentType string
+ boost *float32
+ scoreType string
+ queryName string
+ innerHit *InnerHit
+}
+
+// NewHasParentQuery creates a new has_parent query.
+func NewHasParentQuery(parentType string, query Query) HasParentQuery {
+ q := HasParentQuery{
+ query: query,
+ parentType: parentType,
+ }
+ return q
+}
+
+func (q HasParentQuery) Boost(boost float32) HasParentQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q HasParentQuery) ScoreType(scoreType string) HasParentQuery {
+ q.scoreType = scoreType
+ return q
+}
+
+func (q HasParentQuery) QueryName(queryName string) HasParentQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q HasParentQuery) InnerHit(innerHit *InnerHit) HasParentQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q HasParentQuery) Source() interface{} {
+ // {
+ // "has_parent" : {
+ // "parent_type" : "blog",
+ // "query" : {
+ // "term" : {
+ // "tag" : "something"
+ // }
+ // }
+ // }
+ // }
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["has_parent"] = query
+
+ query["query"] = q.query.Source()
+ query["parent_type"] = q.parentType
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.scoreType != "" {
+ query["score_type"] = q.scoreType
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ query["inner_hits"] = q.innerHit.Source()
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids.go
new file mode 100644
index 0000000..9a01a04
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_ids.go
@@ -0,0 +1,77 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Filters documents that only have the provided ids.
+// Note, this filter does not require the _id field to be indexed
+// since it works using the _uid field.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-ids-query.html
+type IdsQuery struct {
+ Query
+ types []string
+ values []string
+ boost float32
+ queryName string
+}
+
+// NewIdsQuery creates a new ids query.
+func NewIdsQuery(types ...string) IdsQuery {
+ q := IdsQuery{
+ types: types,
+ values: make([]string, 0),
+ boost: -1.0,
+ }
+ return q
+}
+
+func (q IdsQuery) Ids(ids ...string) IdsQuery {
+ q.values = append(q.values, ids...)
+ return q
+}
+
+func (q IdsQuery) Boost(boost float32) IdsQuery {
+ q.boost = boost
+ return q
+}
+
+func (q IdsQuery) QueryName(queryName string) IdsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the ids query.
+func (q IdsQuery) Source() interface{} {
+ // {
+ // "ids" : {
+ // "type" : "my_type",
+ // "values" : ["1", "4", "100"]
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["ids"] = query
+
+ // type(s)
+ if len(q.types) == 1 {
+ query["type"] = q.types[0]
+ } else if len(q.types) > 1 {
+ query["types"] = q.types
+ }
+
+ // values
+ query["values"] = q.values
+
+ if q.boost != -1.0 {
+ query["boost"] = q.boost
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match.go
new file mode 100644
index 0000000..04d34f6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match.go
@@ -0,0 +1,198 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MatchQuery is a family of queries that accept text/numerics/dates,
+// analyzes it, and constructs a query out of it. For more details,
+// see http://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html.
+//
+// To create a new MatchQuery, use NewMatchQuery. To create specific types
+// of queries, e.g. a match_phrase query, use NewMatchQuery(...).Type("phrase"),
+// or use one of the shortcuts like NewMatchPhraseQuery(...).
+type MatchQuery struct {
+ Query
+ name string
+ value interface{}
+ matchQueryType string // boolean, phrase, phrase_prefix
+ operator string // or / and
+ analyzer string
+ boost *float32
+ slop *int
+ fuzziness string
+ prefixLength *int
+ maxExpansions *int
+ minimumShouldMatch string
+ rewrite string
+ fuzzyRewrite string
+ lenient *bool
+ fuzzyTranspositions *bool
+ zeroTermsQuery string
+ cutoffFrequency *float32
+ queryName string
+}
+
+// NewMatchQuery creates a new MatchQuery.
+func NewMatchQuery(name string, value interface{}) MatchQuery {
+ q := MatchQuery{name: name, value: value}
+ return q
+}
+
+// NewMatchPhraseQuery creates a new MatchQuery with type phrase.
+func NewMatchPhraseQuery(name string, value interface{}) MatchQuery {
+ q := MatchQuery{name: name, value: value, matchQueryType: "phrase"}
+ return q
+}
+
+// NewMatchPhrasePrefixQuery creates a new MatchQuery with type phrase_prefix.
+func NewMatchPhrasePrefixQuery(name string, value interface{}) MatchQuery {
+ q := MatchQuery{name: name, value: value, matchQueryType: "phrase_prefix"}
+ return q
+}
+
+// Type can be "boolean", "phrase", or "phrase_prefix".
+func (q MatchQuery) Type(matchQueryType string) MatchQuery {
+ q.matchQueryType = matchQueryType
+ return q
+}
+
+func (q MatchQuery) Operator(operator string) MatchQuery {
+ q.operator = operator
+ return q
+}
+
+func (q MatchQuery) Analyzer(analyzer string) MatchQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MatchQuery) Boost(boost float32) MatchQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MatchQuery) Slop(slop int) MatchQuery {
+ q.slop = &slop
+ return q
+}
+
+func (q MatchQuery) Fuzziness(fuzziness string) MatchQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q MatchQuery) PrefixLength(prefixLength int) MatchQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q MatchQuery) MaxExpansions(maxExpansions int) MatchQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q MatchQuery) MinimumShouldMatch(minimumShouldMatch string) MatchQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q MatchQuery) Rewrite(rewrite string) MatchQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q MatchQuery) FuzzyRewrite(fuzzyRewrite string) MatchQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+func (q MatchQuery) Lenient(lenient bool) MatchQuery {
+ q.lenient = &lenient
+ return q
+}
+
+func (q MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) MatchQuery {
+ q.fuzzyTranspositions = &fuzzyTranspositions
+ return q
+}
+
+// ZeroTermsQuery can be "all" or "none".
+func (q MatchQuery) ZeroTermsQuery(zeroTermsQuery string) MatchQuery {
+ q.zeroTermsQuery = zeroTermsQuery
+ return q
+}
+
+func (q MatchQuery) CutoffFrequency(cutoff float32) MatchQuery {
+ q.cutoffFrequency = &cutoff
+ return q
+}
+
+func (q MatchQuery) QueryName(queryName string) MatchQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q MatchQuery) Source() interface{} {
+ // {"match":{"name":{"query":"value","type":"boolean/phrase"}}}
+ source := make(map[string]interface{})
+
+ match := make(map[string]interface{})
+ source["match"] = match
+
+ query := make(map[string]interface{})
+ match[q.name] = query
+
+ query["query"] = q.value
+
+ if q.matchQueryType != "" {
+ query["type"] = q.matchQueryType
+ }
+ if q.operator != "" {
+ query["operator"] = q.operator
+ }
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+ if q.slop != nil {
+ query["slop"] = *q.slop
+ }
+ if q.fuzziness != "" {
+ query["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ query["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ query["max_expansions"] = *q.maxExpansions
+ }
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.rewrite != "" {
+ query["rewrite"] = q.rewrite
+ }
+ if q.fuzzyRewrite != "" {
+ query["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+ if q.fuzzyTranspositions != nil {
+ query["fuzzy_transpositions"] = *q.fuzzyTranspositions
+ }
+ if q.zeroTermsQuery != "" {
+ query["zero_terms_query"] = q.zeroTermsQuery
+ }
+ if q.cutoffFrequency != nil {
+ query["cutoff_frequency"] = q.cutoffFrequency
+ }
+ if q.queryName != "" {
+ query["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all.go
new file mode 100644
index 0000000..d2ba3eb
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_match_all.go
@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that matches all documents. Maps to Lucene MatchAllDocsQuery.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-match-all-query.html
+type MatchAllQuery struct {
+ Query
+ normsField string
+ boost *float32
+}
+
+// NewMatchAllQuery creates a new match all query.
+func NewMatchAllQuery() MatchAllQuery {
+ q := MatchAllQuery{}
+ return q
+}
+
+func (q MatchAllQuery) NormsField(normsField string) MatchAllQuery {
+ q.normsField = normsField
+ return q
+}
+
+func (q MatchAllQuery) Boost(boost float32) MatchAllQuery {
+ q.boost = &boost
+ return q
+}
+
+// Creates the query source for the match all query.
+func (q MatchAllQuery) Source() interface{} {
+ // {
+ // "match_all" : { ... }
+ // }
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["match_all"] = params
+ if q.boost != nil {
+ params["boost"] = q.boost
+ }
+ if q.normsField != "" {
+ params["norms_field"] = q.normsField
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this.go
new file mode 100644
index 0000000..df12026
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this.go
@@ -0,0 +1,399 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "math"
+)
+
+// More like this query find documents that are “like” provided text
+// by running it against one or more fields. For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/mlt-query/
+type MoreLikeThisQuery struct {
+ fields []string
+ likeText string
+ ids []string
+ docs []*MoreLikeThisQueryItem
+ include *bool
+ minimumShouldMatch string
+ minTermFreq *int
+ maxQueryTerms *int
+ stopWords []string
+ minDocFreq *int
+ maxDocFreq *int
+ minWordLen *int
+ maxWordLen *int
+ boostTerms *float64
+ boost *float64
+ analyzer string
+ failOnUnsupportedField *bool
+ queryName string
+}
+
+// NewMoreLikeThisQuery creates a new more-like-this query.
+func NewMoreLikeThisQuery(likeText string) MoreLikeThisQuery {
+ return MoreLikeThisQuery{
+ likeText: likeText,
+ fields: make([]string, 0),
+ ids: make([]string, 0),
+ docs: make([]*MoreLikeThisQueryItem, 0),
+ stopWords: make([]string, 0),
+ }
+}
+
+// Field adds one or more field names to the query.
+func (q MoreLikeThisQuery) Field(fields ...string) MoreLikeThisQuery {
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+// Fields adds one or more field names to the query.
+// Deprecated: Use Field for compatibility with elastic.v3.
+func (q MoreLikeThisQuery) Fields(fields ...string) MoreLikeThisQuery {
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+// StopWord sets the stopwords. Any word in this set is considered
+// "uninteresting" and ignored. Even if your Analyzer allows stopwords,
+// you might want to tell the MoreLikeThis code to ignore them, as for
+// the purposes of document similarity it seems reasonable to assume that
+// "a stop word is never interesting".
+func (q MoreLikeThisQuery) StopWord(stopWords ...string) MoreLikeThisQuery {
+ q.stopWords = append(q.stopWords, stopWords...)
+ return q
+}
+
+// StopWords is an alias for StopWord.
+// Deprecated: Use StopWord for compatibility with elastic.v3.
+func (q MoreLikeThisQuery) StopWords(stopWords ...string) MoreLikeThisQuery {
+ q.stopWords = append(q.stopWords, stopWords...)
+ return q
+}
+
+// LikeText sets the text to use in order to find documents that are "like" this.
+func (q MoreLikeThisQuery) LikeText(likeText string) MoreLikeThisQuery {
+ q.likeText = likeText
+ return q
+}
+
+// Docs sets the documents to use in order to find documents that are "like" this.
+func (q MoreLikeThisQuery) Docs(docs ...*MoreLikeThisQueryItem) MoreLikeThisQuery {
+ q.docs = append(q.docs, docs...)
+ return q
+}
+
+// Ids sets the document ids to use in order to find documents that are "like" this.
+func (q MoreLikeThisQuery) Ids(ids ...string) MoreLikeThisQuery {
+ q.ids = append(q.ids, ids...)
+ return q
+}
+
+// Include specifies whether the input documents should also be included
+// in the results returned. Defaults to false.
+func (q MoreLikeThisQuery) Include(include bool) MoreLikeThisQuery {
+ q.include = &include
+ return q
+}
+
+// PercentTermsToMatch will be changed to MinimumShouldMatch.
+func (q MoreLikeThisQuery) PercentTermsToMatch(percentTermsToMatch float64) MoreLikeThisQuery {
+ q.minimumShouldMatch = fmt.Sprintf("%d%%", int(math.Floor(percentTermsToMatch*100)))
+ return q
+}
+
+// MinimumShouldMatch sets the number of terms that must match the generated
+// query expressed in the common syntax for minimum should match.
+// The default value is "30%".
+//
+// This used to be "PercentTermsToMatch".
+func (q MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) MoreLikeThisQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+// MinTermFreq is the frequency below which terms will be ignored in the
+// source doc. The default frequency is 2.
+func (q MoreLikeThisQuery) MinTermFreq(minTermFreq int) MoreLikeThisQuery {
+ q.minTermFreq = &minTermFreq
+ return q
+}
+
+// MaxQueryTerms sets the maximum number of query terms that will be included
+// in any generated query. It defaults to 25.
+func (q MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) MoreLikeThisQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+// MinDocFreq sets the frequency at which words will be ignored which do
+// not occur in at least this many docs. The default is 5.
+func (q MoreLikeThisQuery) MinDocFreq(minDocFreq int) MoreLikeThisQuery {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+// MaxDocFreq sets the maximum frequency for which words may still appear.
+// Words that appear in more than this many docs will be ignored.
+// It defaults to unbounded.
+func (q MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) MoreLikeThisQuery {
+ q.maxDocFreq = &maxDocFreq
+ return q
+}
+
+// MinWordLength sets the minimum word length below which words will be
+// ignored. It defaults to 0.
+func (q MoreLikeThisQuery) MinWordLen(minWordLen int) MoreLikeThisQuery {
+ q.minWordLen = &minWordLen
+ return q
+}
+
+// MaxWordLen sets the maximum word length above which words will be ignored.
+// Defaults to unbounded (0).
+func (q MoreLikeThisQuery) MaxWordLen(maxWordLen int) MoreLikeThisQuery {
+ q.maxWordLen = &maxWordLen
+ return q
+}
+
+// BoostTerms sets the boost factor to use when boosting terms.
+// It defaults to 1.
+func (q MoreLikeThisQuery) BoostTerms(boostTerms float64) MoreLikeThisQuery {
+ q.boostTerms = &boostTerms
+ return q
+}
+
+// Analyzer specifies the analyzer that will be use to analyze the text.
+// Defaults to the analyzer associated with the field.
+func (q MoreLikeThisQuery) Analyzer(analyzer string) MoreLikeThisQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q MoreLikeThisQuery) Boost(boost float64) MoreLikeThisQuery {
+ q.boost = &boost
+ return q
+}
+
+// FailOnUnsupportedField indicates whether to fail or return no result
+// when this query is run against a field which is not supported such as
+// a binary/numeric field.
+func (q MoreLikeThisQuery) FailOnUnsupportedField(fail bool) MoreLikeThisQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+// QueryName sets the query name for the filter that can be used when
+// searching for matched_filters per hit.
+func (q MoreLikeThisQuery) QueryName(queryName string) MoreLikeThisQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the mlt query.
+func (q MoreLikeThisQuery) Source() interface{} {
+ // {
+ // "match_all" : { ... }
+ // }
+ params := make(map[string]interface{})
+ source := make(map[string]interface{})
+ source["mlt"] = params
+
+ if q.likeText == "" && len(q.docs) == 0 && len(q.ids) == 0 {
+ // We have no form of returning errors for invalid queries as of Elastic v2.
+ // We also don't have access to the client here, so we can't log anything.
+ // All we can do is to return an empty query, I suppose.
+ // TODO Is there a better approach here?
+ //return nil, errors.New(`more_like_this requires some documents to be "liked"`)
+ return source
+ }
+
+ if len(q.fields) > 0 {
+ params["fields"] = q.fields
+ }
+ if q.likeText != "" {
+ params["like_text"] = q.likeText
+ }
+ if q.minimumShouldMatch != "" {
+ params["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.minTermFreq != nil {
+ params["min_term_freq"] = *q.minTermFreq
+ }
+ if q.maxQueryTerms != nil {
+ params["max_query_terms"] = *q.maxQueryTerms
+ }
+ if len(q.stopWords) > 0 {
+ params["stop_words"] = q.stopWords
+ }
+ if q.minDocFreq != nil {
+ params["min_doc_freq"] = *q.minDocFreq
+ }
+ if q.maxDocFreq != nil {
+ params["max_doc_freq"] = *q.maxDocFreq
+ }
+ if q.minWordLen != nil {
+ params["min_word_len"] = *q.minWordLen
+ }
+ if q.maxWordLen != nil {
+ params["max_word_len"] = *q.maxWordLen
+ }
+ if q.boostTerms != nil {
+ params["boost_terms"] = *q.boostTerms
+ }
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.analyzer != "" {
+ params["analyzer"] = q.analyzer
+ }
+ if q.failOnUnsupportedField != nil {
+ params["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ if len(q.ids) > 0 {
+ params["ids"] = q.ids
+ }
+ if len(q.docs) > 0 {
+ docs := make([]interface{}, 0)
+ for _, doc := range q.docs {
+ docs = append(docs, doc.Source())
+ }
+ params["docs"] = docs
+ }
+ if q.include != nil {
+ params["exclude"] = !(*q.include) // ES 1.x only has exclude
+ }
+
+ return source
+}
+
+// -- MoreLikeThisQueryItem --
+
+// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery
+// to be "liked" or "unliked".
+type MoreLikeThisQueryItem struct {
+ likeText string
+
+ index string
+ typ string
+ id string
+ doc interface{}
+ fields []string
+ routing string
+ fsc *FetchSourceContext
+ version int64
+ versionType string
+}
+
+// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem.
+func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem {
+ return &MoreLikeThisQueryItem{
+ version: -1,
+ }
+}
+
+// LikeText represents a text to be "liked".
+func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem {
+ item.likeText = likeText
+ return item
+}
+
+// Index represents the index of the item.
+func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem {
+ item.index = index
+ return item
+}
+
+// Type represents the document type of the item.
+func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem {
+ item.typ = typ
+ return item
+}
+
+// Id represents the document id of the item.
+func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem {
+ item.id = id
+ return item
+}
+
+// Doc represents a raw document template for the item.
+func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem {
+ item.doc = doc
+ return item
+}
+
+// Fields represents the list of fields of the item.
+func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem {
+ item.fields = append(item.fields, fields...)
+ return item
+}
+
+// Routing sets the routing associated with the item.
+func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem {
+ item.routing = routing
+ return item
+}
+
+// FetchSourceContext represents the fetch source of the item which controls
+// if and how _source should be returned.
+func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem {
+ item.fsc = fsc
+ return item
+}
+
+// Version specifies the version of the item.
+func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem {
+ item.version = version
+ return item
+}
+
+// VersionType represents the version type of the item.
+func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem {
+ item.versionType = versionType
+ return item
+}
+
+// Source returns the JSON-serializable fragment of the entity.
+func (item *MoreLikeThisQueryItem) Source() interface{} {
+ if item.likeText != "" {
+ return item.likeText
+ }
+
+ source := make(map[string]interface{})
+
+ if item.index != "" {
+ source["_index"] = item.index
+ }
+ if item.typ != "" {
+ source["_type"] = item.typ
+ }
+ if item.id != "" {
+ source["_id"] = item.id
+ }
+ if item.doc != nil {
+ source["doc"] = item.doc
+ }
+ if len(item.fields) > 0 {
+ source["fields"] = item.fields
+ }
+ if item.routing != "" {
+ source["_routing"] = item.routing
+ }
+ if item.fsc != nil {
+ source["_source"] = item.fsc.Source()
+ }
+ if item.version >= 0 {
+ source["_version"] = item.version
+ }
+ if item.versionType != "" {
+ source["_version_type"] = item.versionType
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field.go
new file mode 100644
index 0000000..e3d723b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_more_like_this_field.go
@@ -0,0 +1,189 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The more_like_this_field query is the same as the more_like_this query,
+// except it runs against a single field. It provides nicer query DSL
+// over the generic more_like_this query, and support typed fields query
+// (automatically wraps typed fields with type filter to match only
+// on the specific type).
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/mlt-field-query/
+type MoreLikeThisFieldQuery struct {
+ Query
+
+ name string
+ likeText string
+ percentTermsToMatch *float32
+ minTermFreq *int
+ maxQueryTerms *int
+ stopWords []string
+ minDocFreq *int
+ maxDocFreq *int
+ minWordLen *int
+ maxWordLen *int
+ boostTerms *float32
+ boost *float32
+ analyzer string
+ failOnUnsupportedField *bool
+}
+
+// Creates a new mlt_field query.
+func NewMoreLikeThisFieldQuery(name, likeText string) MoreLikeThisFieldQuery {
+ q := MoreLikeThisFieldQuery{
+ name: name,
+ likeText: likeText,
+ stopWords: make([]string, 0),
+ }
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) Name(name string) MoreLikeThisFieldQuery {
+ q.name = name
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) StopWord(stopWord string) MoreLikeThisFieldQuery {
+ q.stopWords = append(q.stopWords, stopWord)
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) StopWords(stopWords ...string) MoreLikeThisFieldQuery {
+ q.stopWords = append(q.stopWords, stopWords...)
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) LikeText(likeText string) MoreLikeThisFieldQuery {
+ q.likeText = likeText
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) PercentTermsToMatch(percentTermsToMatch float32) MoreLikeThisFieldQuery {
+ q.percentTermsToMatch = &percentTermsToMatch
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MinTermFreq(minTermFreq int) MoreLikeThisFieldQuery {
+ q.minTermFreq = &minTermFreq
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MaxQueryTerms(maxQueryTerms int) MoreLikeThisFieldQuery {
+ q.maxQueryTerms = &maxQueryTerms
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MinDocFreq(minDocFreq int) MoreLikeThisFieldQuery {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MaxDocFreq(maxDocFreq int) MoreLikeThisFieldQuery {
+ q.maxDocFreq = &maxDocFreq
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MinWordLen(minWordLen int) MoreLikeThisFieldQuery {
+ q.minWordLen = &minWordLen
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) MaxWordLen(maxWordLen int) MoreLikeThisFieldQuery {
+ q.maxWordLen = &maxWordLen
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) BoostTerms(boostTerms float32) MoreLikeThisFieldQuery {
+ q.boostTerms = &boostTerms
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) Analyzer(analyzer string) MoreLikeThisFieldQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) Boost(boost float32) MoreLikeThisFieldQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MoreLikeThisFieldQuery) FailOnUnsupportedField(fail bool) MoreLikeThisFieldQuery {
+ q.failOnUnsupportedField = &fail
+ return q
+}
+
+// Creates the query source for the mlt query.
+func (q MoreLikeThisFieldQuery) Source() interface{} {
+ // {
+ // "more_like_this_field" : {
+ // "name.first" : {
+ // "like_text" : "text like this one",
+ // "min_term_freq" : 1,
+ // "max_query_terms" : 12
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ params := make(map[string]interface{})
+ source["more_like_this_field"] = params
+
+ mlt := make(map[string]interface{})
+ params[q.name] = mlt
+
+ mlt["like_text"] = q.likeText
+
+ if q.percentTermsToMatch != nil {
+ mlt["percent_terms_to_match"] = *q.percentTermsToMatch
+ }
+
+ if q.minTermFreq != nil {
+ mlt["min_term_freq"] = *q.minTermFreq
+ }
+
+ if q.maxQueryTerms != nil {
+ mlt["max_query_terms"] = *q.maxQueryTerms
+ }
+
+ if len(q.stopWords) > 0 {
+ mlt["stop_words"] = q.stopWords
+ }
+
+ if q.minDocFreq != nil {
+ mlt["min_doc_freq"] = *q.minDocFreq
+ }
+
+ if q.maxDocFreq != nil {
+ mlt["max_doc_freq"] = *q.maxDocFreq
+ }
+
+ if q.minWordLen != nil {
+ mlt["min_word_len"] = *q.minWordLen
+ }
+
+ if q.maxWordLen != nil {
+ mlt["max_word_len"] = *q.maxWordLen
+ }
+
+ if q.boostTerms != nil {
+ mlt["boost_terms"] = *q.boostTerms
+ }
+
+ if q.boost != nil {
+ mlt["boost"] = *q.boost
+ }
+
+ if q.analyzer != "" {
+ mlt["analyzer"] = q.analyzer
+ }
+
+ if q.failOnUnsupportedField != nil {
+ mlt["fail_on_unsupported_field"] = *q.failOnUnsupportedField
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match.go
new file mode 100644
index 0000000..a52b853
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_multi_match.go
@@ -0,0 +1,253 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strings"
+)
+
+// The multi_match query builds further on top of the match query by allowing multiple fields to be specified.
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/multi-match-query.html
+type MultiMatchQuery struct {
+ Query
+ text interface{}
+ fields []string
+ fieldBoosts map[string]*float32
+ matchQueryType string // best_fields, most_fields, cross_fields, phrase, phrase_prefix
+ operator string // and / or
+ analyzer string
+ boost *float32
+ slop *int
+ fuzziness string
+ prefixLength *int
+ maxExpansions *int
+ minimumShouldMatch string
+ rewrite string
+ fuzzyRewrite string
+ useDisMax *bool
+ tieBreaker *float32
+ lenient *bool
+ cutoffFrequency *float32
+ zeroTermsQuery string
+ queryName string
+}
+
+func NewMultiMatchQuery(text interface{}, fields ...string) MultiMatchQuery {
+ q := MultiMatchQuery{
+ text: text,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float32),
+ }
+ q.fields = append(q.fields, fields...)
+ return q
+}
+
+func (q MultiMatchQuery) Field(field string) MultiMatchQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q MultiMatchQuery) FieldWithBoost(field string, boost float32) MultiMatchQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+// Type can be: "best_fields", "boolean", "most_fields", "cross_fields",
+// "phrase", or "phrase_prefix".
+func (q MultiMatchQuery) Type(matchQueryType string) MultiMatchQuery {
+ zero := float32(0.0)
+ one := float32(1.0)
+
+ switch strings.ToLower(matchQueryType) {
+ default: // best_fields / boolean
+ q.matchQueryType = "best_fields"
+ q.tieBreaker = &zero
+ case "most_fields":
+ q.matchQueryType = "most_fields"
+ q.tieBreaker = &one
+ case "cross_fields":
+ q.matchQueryType = "cross_fields"
+ q.tieBreaker = &zero
+ case "phrase":
+ q.matchQueryType = "phrase"
+ q.tieBreaker = &zero
+ case "phrase_prefix":
+ q.matchQueryType = "phrase_prefix"
+ q.tieBreaker = &zero
+ }
+ return q
+}
+
+func (q MultiMatchQuery) Operator(operator string) MultiMatchQuery {
+ q.operator = operator
+ return q
+}
+
+func (q MultiMatchQuery) Analyzer(analyzer string) MultiMatchQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q MultiMatchQuery) Boost(boost float32) MultiMatchQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q MultiMatchQuery) Slop(slop int) MultiMatchQuery {
+ q.slop = &slop
+ return q
+}
+
+func (q MultiMatchQuery) Fuzziness(fuzziness string) MultiMatchQuery {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q MultiMatchQuery) PrefixLength(prefixLength int) MultiMatchQuery {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q MultiMatchQuery) MaxExpansions(maxExpansions int) MultiMatchQuery {
+ q.maxExpansions = &maxExpansions
+ return q
+}
+
+func (q MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) MultiMatchQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q MultiMatchQuery) Rewrite(rewrite string) MultiMatchQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) MultiMatchQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+// Deprecated.
+func (q MultiMatchQuery) UseDisMax(useDisMax bool) MultiMatchQuery {
+ q.useDisMax = &useDisMax
+ return q
+}
+
+func (q MultiMatchQuery) TieBreaker(tieBreaker float32) MultiMatchQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+func (q MultiMatchQuery) Lenient(lenient bool) MultiMatchQuery {
+ q.lenient = &lenient
+ return q
+}
+
+func (q MultiMatchQuery) CutoffFrequency(cutoff float32) MultiMatchQuery {
+ q.cutoffFrequency = &cutoff
+ return q
+}
+
+// ZeroTermsQuery can be "all" or "none".
+func (q MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) MultiMatchQuery {
+ q.zeroTermsQuery = zeroTermsQuery
+ return q
+}
+
+func (q MultiMatchQuery) QueryName(queryName string) MultiMatchQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q MultiMatchQuery) Source() interface{} {
+ //
+ // {
+ // "multi_match" : {
+ // "query" : "this is a test",
+ // "fields" : [ "subject", "message" ]
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ multiMatch := make(map[string]interface{})
+ source["multi_match"] = multiMatch
+
+ multiMatch["query"] = q.text
+
+ if len(q.fields) > 0 {
+ fields := make([]string, 0)
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ multiMatch["fields"] = fields
+ }
+
+ if q.matchQueryType != "" {
+ multiMatch["type"] = q.matchQueryType
+ }
+
+ if q.operator != "" {
+ multiMatch["operator"] = q.operator
+ }
+ if q.analyzer != "" {
+ multiMatch["analyzer"] = q.analyzer
+ }
+ if q.boost != nil {
+ multiMatch["boost"] = *q.boost
+ }
+ if q.slop != nil {
+ multiMatch["slop"] = *q.slop
+ }
+ if q.fuzziness != "" {
+ multiMatch["fuzziness"] = q.fuzziness
+ }
+ if q.prefixLength != nil {
+ multiMatch["prefix_length"] = *q.prefixLength
+ }
+ if q.maxExpansions != nil {
+ multiMatch["max_expansions"] = *q.maxExpansions
+ }
+ if q.minimumShouldMatch != "" {
+ multiMatch["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.rewrite != "" {
+ multiMatch["rewrite"] = q.rewrite
+ }
+ if q.fuzzyRewrite != "" {
+ multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+ if q.useDisMax != nil {
+ multiMatch["use_dis_max"] = *q.useDisMax
+ }
+ if q.tieBreaker != nil {
+ multiMatch["tie_breaker"] = *q.tieBreaker
+ }
+ if q.lenient != nil {
+ multiMatch["lenient"] = *q.lenient
+ }
+ if q.cutoffFrequency != nil {
+ multiMatch["cutoff_frequency"] = *q.cutoffFrequency
+ }
+ if q.zeroTermsQuery != "" {
+ multiMatch["zero_terms_query"] = q.zeroTermsQuery
+ }
+ if q.queryName != "" {
+ multiMatch["_name"] = q.queryName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested.go
new file mode 100644
index 0000000..375be65
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_nested.go
@@ -0,0 +1,113 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Nested query allows to query nested objects / docs (see nested mapping).
+// The query is executed against the nested objects / docs as if they were
+// indexed as separate docs (they are, internally) and resulting in the
+// root parent doc (or parent nested mapping).
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/reference/query-dsl/nested-query/
+type NestedQuery struct {
+ query Query
+ filter Filter
+ path string
+ scoreMode string
+ boost *float32
+ queryName string
+ innerHit *InnerHit
+}
+
+// Creates a new nested_query query.
+func NewNestedQuery(path string) NestedQuery {
+ return NestedQuery{path: path}
+}
+
+func (q NestedQuery) Query(query Query) NestedQuery {
+ q.query = query
+ return q
+}
+
+func (q NestedQuery) Filter(filter Filter) NestedQuery {
+ q.filter = filter
+ return q
+}
+
+func (q NestedQuery) Path(path string) NestedQuery {
+ q.path = path
+ return q
+}
+
+func (q NestedQuery) ScoreMode(scoreMode string) NestedQuery {
+ q.scoreMode = scoreMode
+ return q
+}
+
+func (q NestedQuery) Boost(boost float32) NestedQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q NestedQuery) QueryName(queryName string) NestedQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q NestedQuery) InnerHit(innerHit *InnerHit) NestedQuery {
+ q.innerHit = innerHit
+ return q
+}
+
+// Creates the query source for the nested_query query.
+func (q NestedQuery) Source() interface{} {
+ // {
+ // "nested" : {
+ // "query" : {
+ // "bool" : {
+ // "must" : [
+ // {
+ // "match" : {"obj1.name" : "blue"}
+ // },
+ // {
+ // "range" : {"obj1.count" : {"gt" : 5}}
+ // }
+ // ]
+ // }
+ // },
+ // "filter" : {
+ // ...
+ // },
+ // "path" : "obj1",
+ // "score_mode" : "avg",
+ // "boost" : 1.0
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ nq := make(map[string]interface{})
+ query["nested"] = nq
+ if q.query != nil {
+ nq["query"] = q.query.Source()
+ }
+ if q.filter != nil {
+ nq["filter"] = q.filter.Source()
+ }
+ nq["path"] = q.path
+ if q.scoreMode != "" {
+ nq["score_mode"] = q.scoreMode
+ }
+ if q.boost != nil {
+ nq["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ nq["_name"] = q.queryName
+ }
+ if q.innerHit != nil {
+ nq["inner_hits"] = q.innerHit.Source()
+ }
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix.go
new file mode 100644
index 0000000..02e95d2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_prefix.go
@@ -0,0 +1,75 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Matches documents that have fields containing terms
+// with a specified prefix (not analyzed).
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/prefix-query.html
+type PrefixQuery struct {
+ Query
+ name string
+ prefix string
+ boost *float32
+ rewrite string
+ queryName string
+}
+
+// Creates a new prefix query.
+func NewPrefixQuery(name string, prefix string) PrefixQuery {
+ q := PrefixQuery{name: name, prefix: prefix}
+ return q
+}
+
+func (q PrefixQuery) Boost(boost float32) PrefixQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q PrefixQuery) Rewrite(rewrite string) PrefixQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q PrefixQuery) QueryName(queryName string) PrefixQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the prefix query.
+func (q PrefixQuery) Source() interface{} {
+ // {
+ // "prefix" : {
+ // "user" : {
+ // "prefix" : "ki",
+ // "boost" : 2.0
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["prefix"] = query
+
+ if q.boost == nil && q.rewrite == "" && q.queryName == "" {
+ query[q.name] = q.prefix
+ } else {
+ subQuery := make(map[string]interface{})
+ subQuery["prefix"] = q.prefix
+ if q.boost != nil {
+ subQuery["boost"] = *q.boost
+ }
+ if q.rewrite != "" {
+ subQuery["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ subQuery["_name"] = q.queryName
+ }
+ query[q.name] = subQuery
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string.go
new file mode 100644
index 0000000..7afdf3c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_query_string.go
@@ -0,0 +1,281 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// A query that uses the query parser in order to parse
+// its content. For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query.html
+type QueryStringQuery struct {
+ Query
+
+ queryString string
+ defaultField string
+ defaultOper string
+ analyzer string
+ quoteAnalyzer string
+ quoteFieldSuffix string
+ autoGeneratePhraseQueries *bool
+ allowLeadingWildcard *bool
+ lowercaseExpandedTerms *bool
+ enablePositionIncrements *bool
+ analyzeWildcard *bool
+ boost *float32
+ fuzzyMinSim *float32
+ fuzzyPrefixLength *int
+ fuzzyMaxExpansions *int
+ fuzzyRewrite string
+ phraseSlop *int
+ fields []string
+ fieldBoosts map[string]*float32
+ useDisMax *bool
+ tieBreaker *float32
+ rewrite string
+ minimumShouldMatch string
+ lenient *bool
+}
+
+// Creates a new query string query.
+func NewQueryStringQuery(queryString string) QueryStringQuery {
+ q := QueryStringQuery{
+ queryString: queryString,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float32),
+ }
+ return q
+}
+
+func (q QueryStringQuery) DefaultField(defaultField string) QueryStringQuery {
+ q.defaultField = defaultField
+ return q
+}
+
+func (q QueryStringQuery) Field(field string) QueryStringQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q QueryStringQuery) FieldWithBoost(field string, boost float32) QueryStringQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+func (q QueryStringQuery) UseDisMax(useDisMax bool) QueryStringQuery {
+ q.useDisMax = &useDisMax
+ return q
+}
+
+func (q QueryStringQuery) TieBreaker(tieBreaker float32) QueryStringQuery {
+ q.tieBreaker = &tieBreaker
+ return q
+}
+
+func (q QueryStringQuery) DefaultOperator(operator string) QueryStringQuery {
+ q.defaultOper = operator
+ return q
+}
+
+func (q QueryStringQuery) Analyzer(analyzer string) QueryStringQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) QueryStringQuery {
+ q.quoteAnalyzer = quoteAnalyzer
+ return q
+}
+
+func (q QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) QueryStringQuery {
+ q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries
+ return q
+}
+
+func (q QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) QueryStringQuery {
+ q.allowLeadingWildcard = &allowLeadingWildcard
+ return q
+}
+
+func (q QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) QueryStringQuery {
+ q.lowercaseExpandedTerms = &lowercaseExpandedTerms
+ return q
+}
+
+func (q QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) QueryStringQuery {
+ q.enablePositionIncrements = &enablePositionIncrements
+ return q
+}
+
+func (q QueryStringQuery) FuzzyMinSim(fuzzyMinSim float32) QueryStringQuery {
+ q.fuzzyMinSim = &fuzzyMinSim
+ return q
+}
+
+func (q QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) QueryStringQuery {
+ q.fuzzyMaxExpansions = &fuzzyMaxExpansions
+ return q
+}
+
+func (q QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) QueryStringQuery {
+ q.fuzzyRewrite = fuzzyRewrite
+ return q
+}
+
+func (q QueryStringQuery) PhraseSlop(phraseSlop int) QueryStringQuery {
+ q.phraseSlop = &phraseSlop
+ return q
+}
+
+func (q QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) QueryStringQuery {
+ q.analyzeWildcard = &analyzeWildcard
+ return q
+}
+
+func (q QueryStringQuery) Rewrite(rewrite string) QueryStringQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+func (q QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) QueryStringQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q QueryStringQuery) Boost(boost float32) QueryStringQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) QueryStringQuery {
+ q.quoteFieldSuffix = quoteFieldSuffix
+ return q
+}
+
+func (q QueryStringQuery) Lenient(lenient bool) QueryStringQuery {
+ q.lenient = &lenient
+ return q
+}
+
+// Creates the query source for the query string query.
+func (q QueryStringQuery) Source() interface{} {
+ // {
+ // "query_string" : {
+ // "default_field" : "content",
+ // "query" : "this AND that OR thus"
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["query_string"] = query
+
+ query["query"] = q.queryString
+
+ if q.defaultField != "" {
+ query["default_field"] = q.defaultField
+ }
+
+ if len(q.fields) > 0 {
+ fields := make([]string, 0)
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ query["fields"] = fields
+ }
+
+ if q.tieBreaker != nil {
+ query["tie_breaker"] = *q.tieBreaker
+ }
+
+ if q.useDisMax != nil {
+ query["use_dis_max"] = *q.useDisMax
+ }
+
+ if q.defaultOper != "" {
+ query["default_operator"] = q.defaultOper
+ }
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+
+ if q.quoteAnalyzer != "" {
+ query["quote_analyzer"] = q.quoteAnalyzer
+ }
+
+ if q.autoGeneratePhraseQueries != nil {
+ query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries
+ }
+
+ if q.allowLeadingWildcard != nil {
+ query["allow_leading_wildcard"] = *q.allowLeadingWildcard
+ }
+
+ if q.lowercaseExpandedTerms != nil {
+ query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
+ }
+
+ if q.enablePositionIncrements != nil {
+ query["enable_position_increments"] = *q.enablePositionIncrements
+ }
+
+ if q.fuzzyMinSim != nil {
+ query["fuzzy_min_sim"] = *q.fuzzyMinSim
+ }
+
+ if q.boost != nil {
+ query["boost"] = *q.boost
+ }
+
+ if q.fuzzyPrefixLength != nil {
+ query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength
+ }
+
+ if q.fuzzyMaxExpansions != nil {
+ query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions
+ }
+
+ if q.fuzzyRewrite != "" {
+ query["fuzzy_rewrite"] = q.fuzzyRewrite
+ }
+
+ if q.phraseSlop != nil {
+ query["phrase_slop"] = *q.phraseSlop
+ }
+
+ if q.analyzeWildcard != nil {
+ query["analyze_wildcard"] = *q.analyzeWildcard
+ }
+
+ if q.rewrite != "" {
+ query["rewrite"] = q.rewrite
+ }
+
+ if q.minimumShouldMatch != "" {
+ query["minimum_should_match"] = q.minimumShouldMatch
+ }
+
+ if q.quoteFieldSuffix != "" {
+ query["quote_field_suffix"] = q.quoteFieldSuffix
+ }
+
+ if q.lenient != nil {
+ query["lenient"] = *q.lenient
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range.go
new file mode 100644
index 0000000..85121bc
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_range.go
@@ -0,0 +1,132 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Matches documents with fields that have terms within a certain range.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-range-query.html
+type RangeQuery struct {
+ Query
+ name string
+ from *interface{}
+ to *interface{}
+ timeZone string
+ format string
+ includeLower bool
+ includeUpper bool
+ boost *float64
+ queryName string
+}
+
+func NewRangeQuery(name string) RangeQuery {
+ q := RangeQuery{name: name, includeLower: true, includeUpper: true}
+ return q
+}
+
+// TimeZone allows for adjusting the from/to fields using a time zone.
+// Only valid for date fields.
+func (q RangeQuery) TimeZone(timeZone string) RangeQuery {
+ q.timeZone = timeZone
+ return q
+}
+
+// Format is a valid option for date fields in a Range query.
+func (q RangeQuery) Format(format string) RangeQuery {
+ q.format = format
+ return q
+}
+
+func (q RangeQuery) From(from interface{}) RangeQuery {
+ q.from = &from
+ return q
+}
+
+func (q RangeQuery) Gt(from interface{}) RangeQuery {
+ q.from = &from
+ q.includeLower = false
+ return q
+}
+
+func (q RangeQuery) Gte(from interface{}) RangeQuery {
+ q.from = &from
+ q.includeLower = true
+ return q
+}
+
+func (q RangeQuery) To(to interface{}) RangeQuery {
+ q.to = &to
+ return q
+}
+
+func (q RangeQuery) Lt(to interface{}) RangeQuery {
+ q.to = &to
+ q.includeUpper = false
+ return q
+}
+
+func (q RangeQuery) Lte(to interface{}) RangeQuery {
+ q.to = &to
+ q.includeUpper = true
+ return q
+}
+
+func (q RangeQuery) IncludeLower(includeLower bool) RangeQuery {
+ q.includeLower = includeLower
+ return q
+}
+
+func (q RangeQuery) IncludeUpper(includeUpper bool) RangeQuery {
+ q.includeUpper = includeUpper
+ return q
+}
+
+func (q RangeQuery) Boost(boost float64) RangeQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q RangeQuery) QueryName(queryName string) RangeQuery {
+ q.queryName = queryName
+ return q
+}
+
+func (q RangeQuery) Source() interface{} {
+ // {
+ // "range" : {
+ // "name" : {
+ // "..." : "..."
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ rangeQ := make(map[string]interface{})
+ source["range"] = rangeQ
+
+ params := make(map[string]interface{})
+ rangeQ[q.name] = params
+
+ params["from"] = q.from
+ params["to"] = q.to
+ if q.timeZone != "" {
+ params["time_zone"] = q.timeZone
+ }
+ if q.format != "" {
+ params["format"] = q.format
+ }
+ params["include_lower"] = q.includeLower
+ params["include_upper"] = q.includeUpper
+
+ if q.boost != nil {
+ rangeQ["boost"] = *q.boost
+ }
+
+ if q.queryName != "" {
+ rangeQ["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp.go
new file mode 100644
index 0000000..9d3bb5a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_regexp.go
@@ -0,0 +1,89 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// RegexpQuery allows you to use regular expression term queries.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html.
+type RegexpQuery struct {
+ Query
+ name string
+ regexp string
+ flags *string
+ boost *float64
+ rewrite *string
+ queryName *string
+ maxDeterminizedStates *int
+}
+
+// NewRegexpQuery creates a new regexp query.
+func NewRegexpQuery(name string, regexp string) RegexpQuery {
+ return RegexpQuery{name: name, regexp: regexp}
+}
+
+// Flags sets the regexp flags.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-regexp-query.html#_optional_operators
+// for details.
+func (q RegexpQuery) Flags(flags string) RegexpQuery {
+ q.flags = &flags
+ return q
+}
+
+func (q RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) RegexpQuery {
+ q.maxDeterminizedStates = &maxDeterminizedStates
+ return q
+}
+
+func (q RegexpQuery) Boost(boost float64) RegexpQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q RegexpQuery) Rewrite(rewrite string) RegexpQuery {
+ q.rewrite = &rewrite
+ return q
+}
+
+func (q RegexpQuery) QueryName(queryName string) RegexpQuery {
+ q.queryName = &queryName
+ return q
+}
+
+// Source returns the JSON-serializable query data.
+func (q RegexpQuery) Source() interface{} {
+ // {
+ // "regexp" : {
+ // "name.first" : {
+ // "value" : "s.*y",
+ // "boost" : 1.2
+ // }
+ // }
+ // }
+
+ source := make(map[string]interface{})
+ query := make(map[string]interface{})
+ source["regexp"] = query
+
+ x := make(map[string]interface{})
+ x["value"] = q.regexp
+ if q.flags != nil {
+ x["flags"] = *q.flags
+ }
+ if q.maxDeterminizedStates != nil {
+ x["max_determinized_states"] = *q.maxDeterminizedStates
+ }
+ if q.boost != nil {
+ x["boost"] = *q.boost
+ }
+ if q.rewrite != nil {
+ x["rewrite"] = *q.rewrite
+ }
+ if q.queryName != nil {
+ x["name"] = *q.queryName
+ }
+ query[q.name] = x
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string.go
new file mode 100644
index 0000000..3e82e6a
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_simple_query_string.go
@@ -0,0 +1,100 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+ "strings"
+)
+
+// SimpleQueryStringQuery is a query that uses the SimpleQueryParser
+// to parse its context. Unlike the regular query_string query,
+// the simple_query_string query will never throw an exception,
+// and discards invalid parts of the query.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html
+type SimpleQueryStringQuery struct {
+ queryText string
+ analyzer string
+ operator string
+ fields []string
+ fieldBoosts map[string]*float32
+}
+
+// Creates a new simple query string query.
+func NewSimpleQueryStringQuery(text string) SimpleQueryStringQuery {
+ q := SimpleQueryStringQuery{
+ queryText: text,
+ fields: make([]string, 0),
+ fieldBoosts: make(map[string]*float32),
+ }
+ return q
+}
+
+func (q SimpleQueryStringQuery) Field(field string) SimpleQueryStringQuery {
+ q.fields = append(q.fields, field)
+ return q
+}
+
+func (q SimpleQueryStringQuery) FieldWithBoost(field string, boost float32) SimpleQueryStringQuery {
+ q.fields = append(q.fields, field)
+ q.fieldBoosts[field] = &boost
+ return q
+}
+
+func (q SimpleQueryStringQuery) Analyzer(analyzer string) SimpleQueryStringQuery {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q SimpleQueryStringQuery) DefaultOperator(defaultOperator string) SimpleQueryStringQuery {
+ q.operator = defaultOperator
+ return q
+}
+
+// Creates the query source for the query string query.
+func (q SimpleQueryStringQuery) Source() interface{} {
+ // {
+ // "simple_query_string" : {
+ // "query" : "\"fried eggs\" +(eggplant | potato) -frittata",
+ // "analyzer" : "snowball",
+ // "fields" : ["body^5","_all"],
+ // "default_operator" : "and"
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["simple_query_string"] = query
+
+ query["query"] = q.queryText
+
+ if len(q.fields) > 0 {
+ fields := make([]string, 0)
+ for _, field := range q.fields {
+ if boost, found := q.fieldBoosts[field]; found {
+ if boost != nil {
+ fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
+ } else {
+ fields = append(fields, field)
+ }
+ } else {
+ fields = append(fields, field)
+ }
+ }
+ query["fields"] = fields
+ }
+
+ if q.analyzer != "" {
+ query["analyzer"] = q.analyzer
+ }
+
+ if q.operator != "" {
+ query["default_operator"] = strings.ToLower(q.operator)
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query.go
new file mode 100644
index 0000000..184d424
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_template_query.go
@@ -0,0 +1,84 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TemplateQuery is a query that accepts a query template and a
+// map of key/value pairs to fill in template parameters.
+//
+// For more details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
+type TemplateQuery struct {
+ vars map[string]interface{}
+ template string
+ templateType string
+}
+
+// NewTemplateQuery creates a new TemplateQuery.
+func NewTemplateQuery(name string) TemplateQuery {
+ return TemplateQuery{
+ template: name,
+ vars: make(map[string]interface{}),
+ }
+}
+
+// Template specifies the name of the template.
+func (q TemplateQuery) Template(name string) TemplateQuery {
+ q.template = name
+ return q
+}
+
+// TemplateType defines which kind of query we use. The values can be:
+// inline, indexed, or file. If undefined, inline is used.
+func (q TemplateQuery) TemplateType(typ string) TemplateQuery {
+ q.templateType = typ
+ return q
+}
+
+// Var sets a single parameter pair.
+func (q TemplateQuery) Var(name string, value interface{}) TemplateQuery {
+ q.vars[name] = value
+ return q
+}
+
+// Vars sets parameters for the template query.
+func (q TemplateQuery) Vars(vars map[string]interface{}) TemplateQuery {
+ q.vars = vars
+ return q
+}
+
+// Source returns the JSON serializable content for the search.
+func (q TemplateQuery) Source() interface{} {
+ // {
+ // "template" : {
+ // "query" : {"match_{{template}}": {}},
+ // "params" : {
+ // "template": "all"
+ // }
+ // }
+ // }
+
+ query := make(map[string]interface{})
+
+ tmpl := make(map[string]interface{})
+ query["template"] = tmpl
+
+ // TODO(oe): Implementation differs from online documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-template-query.html
+ var fieldname string
+ switch q.templateType {
+ case "file": // file
+ fieldname = "file"
+ case "indexed", "id": // indexed
+ fieldname = "id"
+ default: // inline
+ fieldname = "query"
+ }
+
+ tmpl[fieldname] = q.template
+ if len(q.vars) > 0 {
+ tmpl["params"] = q.vars
+ }
+
+ return query
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term.go
new file mode 100644
index 0000000..7b8b518
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_term.go
@@ -0,0 +1,55 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A term query matches documents that contain
+// a term (not analyzed). For more details, see
+// http://www.elasticsearch.org/guide/reference/query-dsl/term-query.html
+type TermQuery struct {
+ Query
+ name string
+ value interface{}
+ boost *float32
+ queryName string
+}
+
+// Creates a new term query.
+func NewTermQuery(name string, value interface{}) TermQuery {
+ t := TermQuery{name: name, value: value}
+ return t
+}
+
+func (q TermQuery) Boost(boost float32) TermQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q TermQuery) QueryName(queryName string) TermQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the term query.
+func (q TermQuery) Source() interface{} {
+ // {"term":{"name":"value"}}
+ source := make(map[string]interface{})
+ tq := make(map[string]interface{})
+ source["term"] = tq
+
+ if q.boost == nil && q.queryName == "" {
+ tq[q.name] = q.value
+ } else {
+ subQ := make(map[string]interface{})
+ subQ["value"] = q.value
+ if q.boost != nil {
+ subQ["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ subQ["_name"] = q.queryName
+ }
+ tq[q.name] = subQ
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms.go
new file mode 100644
index 0000000..40a8ed9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_terms.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A query that match on any (configurable) of the provided terms.
+// This is a simpler syntax query for using a bool query with
+// several term queries in the should clauses.
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html
+type TermsQuery struct {
+ Query
+ name string
+ values []interface{}
+ minimumShouldMatch string
+ disableCoord *bool
+ boost *float32
+ queryName string
+}
+
+// NewTermsQuery creates a new terms query.
+func NewTermsQuery(name string, values ...interface{}) TermsQuery {
+ t := TermsQuery{
+ name: name,
+ values: make([]interface{}, 0),
+ }
+ if len(values) > 0 {
+ t.values = append(t.values, values...)
+ }
+ return t
+}
+
+func (q TermsQuery) MinimumShouldMatch(minimumShouldMatch string) TermsQuery {
+ q.minimumShouldMatch = minimumShouldMatch
+ return q
+}
+
+func (q TermsQuery) DisableCoord(disableCoord bool) TermsQuery {
+ q.disableCoord = &disableCoord
+ return q
+}
+
+func (q TermsQuery) Boost(boost float32) TermsQuery {
+ q.boost = &boost
+ return q
+}
+
+func (q TermsQuery) QueryName(queryName string) TermsQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Creates the query source for the term query.
+func (q TermsQuery) Source() interface{} {
+ // {"terms":{"name":["value1","value2"]}}
+ source := make(map[string]interface{})
+ params := make(map[string]interface{})
+ source["terms"] = params
+ params[q.name] = q.values
+ if q.minimumShouldMatch != "" {
+ params["minimum_should_match"] = q.minimumShouldMatch
+ }
+ if q.disableCoord != nil {
+ params["disable_coord"] = *q.disableCoord
+ }
+ if q.boost != nil {
+ params["boost"] = *q.boost
+ }
+ if q.queryName != "" {
+ params["_name"] = q.queryName
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard.go
new file mode 100644
index 0000000..5a25e24
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_queries_wildcard.go
@@ -0,0 +1,100 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// WildcardQuery matches documents that have fields matching a wildcard
+// expression (not analyzed). Supported wildcards are *, which matches
+// any character sequence (including the empty one), and ?, which matches
+// any single character. Note this query can be slow, as it needs to iterate
+// over many terms. In order to prevent extremely slow wildcard queries,
+// a wildcard term should not start with one of the wildcards * or ?.
+// The wildcard query maps to Lucene WildcardQuery.
+//
+// For more details, see
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html.
+type WildcardQuery struct {
+ Query
+
+ name string
+ wildcard string
+ boost float32
+ rewrite string
+ queryName string
+}
+
+// NewWildcardQuery creates a new wildcard query.
+func NewWildcardQuery(name, wildcard string) WildcardQuery {
+ q := WildcardQuery{
+ name: name,
+ wildcard: wildcard,
+ boost: -1.0,
+ }
+ return q
+}
+
+// Name is the name of the field name.
+func (q WildcardQuery) Name(name string) WildcardQuery {
+ q.name = name
+ return q
+}
+
+// Wildcard is the wildcard to be used in the query, e.g. ki*y??.
+func (q WildcardQuery) Wildcard(wildcard string) WildcardQuery {
+ q.wildcard = wildcard
+ return q
+}
+
+// Boost sets the boost for this query.
+func (q WildcardQuery) Boost(boost float32) WildcardQuery {
+ q.boost = boost
+ return q
+}
+
+// Rewrite controls the rewriting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-multi-term-rewrite.html
+// for details.
+func (q WildcardQuery) Rewrite(rewrite string) WildcardQuery {
+ q.rewrite = rewrite
+ return q
+}
+
+// QueryName sets the name of this query.
+func (q WildcardQuery) QueryName(queryName string) WildcardQuery {
+ q.queryName = queryName
+ return q
+}
+
+// Source returns the JSON serializable body of this query.
+func (q WildcardQuery) Source() interface{} {
+ // {
+ // "wildcard" : {
+ // "user" : {
+ // "wildcard" : "ki*y",
+ // "boost" : 1.0
+ // }
+ // }
+
+ source := make(map[string]interface{})
+
+ query := make(map[string]interface{})
+ source["wildcard"] = query
+
+ wq := make(map[string]interface{})
+ query[q.name] = wq
+
+ wq["wildcard"] = q.wildcard
+
+ if q.boost != -1.0 {
+ wq["boost"] = q.boost
+ }
+ if q.rewrite != "" {
+ wq["rewrite"] = q.rewrite
+ }
+ if q.queryName != "" {
+ wq["_name"] = q.queryName
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request.go
new file mode 100644
index 0000000..2fc0311
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_request.go
@@ -0,0 +1,158 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "strings"
+)
+
+// SearchRequest combines a search request and its
+// query details (see SearchSource).
+// It is used in combination with MultiSearch.
+type SearchRequest struct {
+ searchType string // default in ES is "query_then_fetch"
+ indices []string
+ types []string
+ routing *string
+ preference *string
+ source interface{}
+}
+
+// NewSearchRequest creates a new search request.
+func NewSearchRequest() *SearchRequest {
+ return &SearchRequest{
+ indices: make([]string, 0),
+ types: make([]string, 0),
+ }
+}
+
+// SearchRequest must be one of "query_then_fetch", "query_and_fetch",
+// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch".
+// Use one of the constants defined via SearchType.
+func (r *SearchRequest) SearchType(searchType string) *SearchRequest {
+ r.searchType = searchType
+ return r
+}
+
+func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest {
+ return r.SearchType("dfs_query_then_fetch")
+}
+
+func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest {
+ return r.SearchType("dfs_query_and_fetch")
+}
+
+func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest {
+ return r.SearchType("query_then_fetch")
+}
+
+func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest {
+ return r.SearchType("query_and_fetch")
+}
+
+func (r *SearchRequest) SearchTypeScan() *SearchRequest {
+ return r.SearchType("scan")
+}
+
+func (r *SearchRequest) SearchTypeCount() *SearchRequest {
+ return r.SearchType("count")
+}
+
+func (r *SearchRequest) Index(index string) *SearchRequest {
+ r.indices = append(r.indices, index)
+ return r
+}
+
+func (r *SearchRequest) Indices(indices ...string) *SearchRequest {
+ r.indices = append(r.indices, indices...)
+ return r
+}
+
+func (r *SearchRequest) HasIndices() bool {
+ return len(r.indices) > 0
+}
+
+func (r *SearchRequest) Type(typ string) *SearchRequest {
+ r.types = append(r.types, typ)
+ return r
+}
+
+func (r *SearchRequest) Types(types ...string) *SearchRequest {
+ r.types = append(r.types, types...)
+ return r
+}
+
+func (r *SearchRequest) Routing(routing string) *SearchRequest {
+ r.routing = &routing
+ return r
+}
+
+func (r *SearchRequest) Routings(routings ...string) *SearchRequest {
+ if routings != nil {
+ routings := strings.Join(routings, ",")
+ r.routing = &routings
+ } else {
+ r.routing = nil
+ }
+ return r
+}
+
+func (r *SearchRequest) Preference(preference string) *SearchRequest {
+ r.preference = &preference
+ return r
+}
+
+func (r *SearchRequest) Source(source interface{}) *SearchRequest {
+ switch v := source.(type) {
+ case *SearchSource:
+ r.source = v.Source()
+ default:
+ r.source = source
+ }
+ return r
+}
+
+// header is used by MultiSearch to get information about the search header
+// of one SearchRequest.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html
+func (r *SearchRequest) header() interface{} {
+ h := make(map[string]interface{})
+ if r.searchType != "" {
+ h["search_type"] = r.searchType
+ }
+
+ switch len(r.indices) {
+ case 0:
+ case 1:
+ h["index"] = r.indices[0]
+ default:
+ h["indices"] = r.indices
+ }
+
+ switch len(r.types) {
+ case 0:
+ case 1:
+ h["types"] = r.types[0]
+ default:
+ h["type"] = r.types
+ }
+
+ if r.routing != nil && *r.routing != "" {
+ h["routing"] = *r.routing
+ }
+
+ if r.preference != nil && *r.preference != "" {
+ h["preference"] = *r.preference
+ }
+
+ return h
+}
+
+// bidy is used by MultiSearch to get information about the search body
+// of one SearchRequest.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-multi-search.html
+func (r *SearchRequest) body() interface{} {
+ return r.source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source.go
new file mode 100644
index 0000000..4429540
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/search_source.go
@@ -0,0 +1,550 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "fmt"
+)
+
+// SearchSource enables users to build the search source.
+// It resembles the SearchSourceBuilder in Elasticsearch.
+type SearchSource struct {
+ query Query
+ postFilter Filter
+ from int
+ size int
+ explain *bool
+ version *bool
+ sorts []SortInfo
+ sorters []Sorter
+ trackScores bool
+ minScore *float64
+ timeout string
+ fieldNames []string
+ fieldDataFields []string
+ scriptFields []*ScriptField
+ partialFields []*PartialField
+ fetchSourceContext *FetchSourceContext
+ facets map[string]Facet
+ aggregations map[string]Aggregation
+ highlight *Highlight
+ globalSuggestText string
+ suggesters []Suggester
+ rescores []*Rescore
+ defaultRescoreWindowSize *int
+ indexBoosts map[string]float64
+ stats []string
+ innerHits map[string]*InnerHit
+}
+
+// NewSearchSource initializes a new SearchSource.
+func NewSearchSource() *SearchSource {
+ return &SearchSource{
+ from: -1,
+ size: -1,
+ trackScores: false,
+ sorts: make([]SortInfo, 0),
+ sorters: make([]Sorter, 0),
+ fieldDataFields: make([]string, 0),
+ scriptFields: make([]*ScriptField, 0),
+ partialFields: make([]*PartialField, 0),
+ facets: make(map[string]Facet),
+ aggregations: make(map[string]Aggregation),
+ rescores: make([]*Rescore, 0),
+ indexBoosts: make(map[string]float64),
+ stats: make([]string, 0),
+ innerHits: make(map[string]*InnerHit),
+ }
+}
+
+// Query sets the query to use with this search source.
+func (s *SearchSource) Query(query Query) *SearchSource {
+ s.query = query
+ return s
+}
+
+// PostFilter will be executed after the query has been executed and
+// only affects the search hits, not the aggregations.
+// This filter is always executed as the last filtering mechanism.
+func (s *SearchSource) PostFilter(postFilter Filter) *SearchSource {
+ s.postFilter = postFilter
+ return s
+}
+
+// From index to start the search from. Defaults to 0.
+func (s *SearchSource) From(from int) *SearchSource {
+ s.from = from
+ return s
+}
+
+// Size is the number of search hits to return. Defaults to 10.
+func (s *SearchSource) Size(size int) *SearchSource {
+ s.size = size
+ return s
+}
+
+// MinScore sets the minimum score below which docs will be filtered out.
+func (s *SearchSource) MinScore(minScore float64) *SearchSource {
+ s.minScore = &minScore
+ return s
+}
+
+// Explain indicates whether each search hit should be returned with
+// an explanation of the hit (ranking).
+func (s *SearchSource) Explain(explain bool) *SearchSource {
+ s.explain = &explain
+ return s
+}
+
+// Version indicates whether each search hit should be returned with
+// a version associated to it.
+func (s *SearchSource) Version(version bool) *SearchSource {
+ s.version = &version
+ return s
+}
+
+// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms".
+func (s *SearchSource) Timeout(timeout string) *SearchSource {
+ s.timeout = timeout
+ return s
+}
+
+// TimeoutInMillis controls how many milliseconds a search is allowed
+// to take before it is canceled.
+func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource {
+ s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
+ return s
+}
+
+// Sort adds a sort order.
+func (s *SearchSource) Sort(field string, ascending bool) *SearchSource {
+ s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending})
+ return s
+}
+
+// SortWithInfo adds a sort order.
+func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource {
+ s.sorts = append(s.sorts, info)
+ return s
+}
+
+// SortBy adds a sort order.
+func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource {
+ s.sorters = append(s.sorters, sorter...)
+ return s
+}
+
+func (s *SearchSource) hasSort() bool {
+ return len(s.sorts) > 0 || len(s.sorters) > 0
+}
+
+// TrackScores is applied when sorting and controls if scores will be
+// tracked as well. Defaults to false.
+func (s *SearchSource) TrackScores(trackScores bool) *SearchSource {
+ s.trackScores = trackScores
+ return s
+}
+
+// Facet adds a facet to perform as part of the search.
+func (s *SearchSource) Facet(name string, facet Facet) *SearchSource {
+ s.facets[name] = facet
+ return s
+}
+
+// Aggregation adds an aggreation to perform as part of the search.
+func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource {
+ s.aggregations[name] = aggregation
+ return s
+}
+
+// DefaultRescoreWindowSize sets the rescore window size for rescores
+// that don't specify their window.
+func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource {
+ s.defaultRescoreWindowSize = &defaultRescoreWindowSize
+ return s
+}
+
+// Highlight adds highlighting to the search.
+func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource {
+ s.highlight = highlight
+ return s
+}
+
+// Highlighter returns the highlighter.
+func (s *SearchSource) Highlighter() *Highlight {
+ if s.highlight == nil {
+ s.highlight = NewHighlight()
+ }
+ return s.highlight
+}
+
+// GlobalSuggestText defines the global text to use with all suggesters.
+// This avoids repetition.
+func (s *SearchSource) GlobalSuggestText(text string) *SearchSource {
+ s.globalSuggestText = text
+ return s
+}
+
+// Suggester adds a suggester to the search.
+func (s *SearchSource) Suggester(suggester Suggester) *SearchSource {
+ s.suggesters = append(s.suggesters, suggester)
+ return s
+}
+
+// AddRescorer adds a rescorer to the search.
+func (s *SearchSource) AddRescore(rescore *Rescore) *SearchSource {
+ s.rescores = append(s.rescores, rescore)
+ return s
+}
+
+// ClearRescorers removes all rescorers from the search.
+func (s *SearchSource) ClearRescores() *SearchSource {
+ s.rescores = make([]*Rescore, 0)
+ return s
+}
+
+// FetchSource indicates whether the response should contain the stored
+// _source for every hit.
+func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource {
+ if s.fetchSourceContext == nil {
+ s.fetchSourceContext = NewFetchSourceContext(fetchSource)
+ } else {
+ s.fetchSourceContext.SetFetchSource(fetchSource)
+ }
+ return s
+}
+
+// FetchSourceContext indicates how the _source should be fetched.
+func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource {
+ s.fetchSourceContext = fetchSourceContext
+ return s
+}
+
+// Fields sets the fields to load and return as part of the search request.
+// If none are specified, the source of the document will be returned.
+func (s *SearchSource) Fields(fieldNames ...string) *SearchSource {
+ if s.fieldNames == nil {
+ s.fieldNames = make([]string, 0)
+ }
+ s.fieldNames = append(s.fieldNames, fieldNames...)
+ return s
+}
+
+// Field adds a single field to load and return (note, must be stored) as
+// part of the search request. If none are specified, the source of the
+// document will be returned.
+func (s *SearchSource) Field(fieldName string) *SearchSource {
+ if s.fieldNames == nil {
+ s.fieldNames = make([]string, 0)
+ }
+ s.fieldNames = append(s.fieldNames, fieldName)
+ return s
+}
+
+// NoFields indicates that no fields should be loaded, resulting in only
+// id and type to be returned per field.
+func (s *SearchSource) NoFields() *SearchSource {
+ s.fieldNames = make([]string, 0)
+ return s
+}
+
+// FieldDataFields adds one or more fields to load from the field data cache
+// and return as part of the search request.
+func (s *SearchSource) FieldDataFields(fieldDataFields ...string) *SearchSource {
+ s.fieldDataFields = append(s.fieldDataFields, fieldDataFields...)
+ return s
+}
+
+// FieldDataField adds a single field to load from the field data cache
+// and return as part of the search request.
+func (s *SearchSource) FieldDataField(fieldDataField string) *SearchSource {
+ s.fieldDataFields = append(s.fieldDataFields, fieldDataField)
+ return s
+}
+
+// ScriptFields adds one or more script fields with the provided scripts.
+func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource {
+ s.scriptFields = append(s.scriptFields, scriptFields...)
+ return s
+}
+
+// ScriptField adds a single script field with the provided script.
+func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource {
+ s.scriptFields = append(s.scriptFields, scriptField)
+ return s
+}
+
+// PartialFields adds partial fields.
+func (s *SearchSource) PartialFields(partialFields ...*PartialField) *SearchSource {
+ s.partialFields = append(s.partialFields, partialFields...)
+ return s
+}
+
+// PartialField adds a partial field.
+func (s *SearchSource) PartialField(partialField *PartialField) *SearchSource {
+ s.partialFields = append(s.partialFields, partialField)
+ return s
+}
+
+// IndexBoost sets the boost that a specific index will receive when the
+// query is executed against it.
+func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource {
+ s.indexBoosts[index] = boost
+ return s
+}
+
+// Stats group this request will be aggregated under.
+func (s *SearchSource) Stats(statsGroup ...string) *SearchSource {
+ s.stats = append(s.stats, statsGroup...)
+ return s
+}
+
+// InnerHit adds an inner hit to return with the result.
+func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource {
+ s.innerHits[name] = innerHit
+ return s
+}
+
+// Source returns the serializable JSON for the source builder.
+func (s *SearchSource) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if s.from != -1 {
+ source["from"] = s.from
+ }
+ if s.size != -1 {
+ source["size"] = s.size
+ }
+ if s.timeout != "" {
+ source["timeout"] = s.timeout
+ }
+ if s.query != nil {
+ source["query"] = s.query.Source()
+ }
+ if s.postFilter != nil {
+ source["post_filter"] = s.postFilter.Source()
+ }
+ if s.minScore != nil {
+ source["min_score"] = *s.minScore
+ }
+ if s.version != nil {
+ source["version"] = *s.version
+ }
+ if s.explain != nil {
+ source["explain"] = *s.explain
+ }
+ if s.fetchSourceContext != nil {
+ source["_source"] = s.fetchSourceContext.Source()
+ }
+
+ if s.fieldNames != nil {
+ switch len(s.fieldNames) {
+ case 1:
+ source["fields"] = s.fieldNames[0]
+ default:
+ source["fields"] = s.fieldNames
+ }
+ }
+
+ if len(s.fieldDataFields) > 0 {
+ source["fielddata_fields"] = s.fieldDataFields
+ }
+
+ if len(s.partialFields) > 0 {
+ pfmap := make(map[string]interface{})
+ for _, partialField := range s.partialFields {
+ pfmap[partialField.Name] = partialField.Source()
+ }
+ source["partial_fields"] = pfmap
+ }
+
+ if len(s.scriptFields) > 0 {
+ sfmap := make(map[string]interface{})
+ for _, scriptField := range s.scriptFields {
+ sfmap[scriptField.FieldName] = scriptField.Source()
+ }
+ source["script_fields"] = sfmap
+ }
+
+ if len(s.sorters) > 0 {
+ sortarr := make([]interface{}, 0)
+ for _, sorter := range s.sorters {
+ sortarr = append(sortarr, sorter.Source())
+ }
+ source["sort"] = sortarr
+ } else if len(s.sorts) > 0 {
+ sortarr := make([]interface{}, 0)
+ for _, sort := range s.sorts {
+ sortarr = append(sortarr, sort.Source())
+ }
+ source["sort"] = sortarr
+ }
+
+ if s.trackScores {
+ source["track_scores"] = s.trackScores
+ }
+
+ if len(s.indexBoosts) > 0 {
+ source["indices_boost"] = s.indexBoosts
+ }
+
+ if len(s.facets) > 0 {
+ facetsMap := make(map[string]interface{})
+ for field, facet := range s.facets {
+ facetsMap[field] = facet.Source()
+ }
+ source["facets"] = facetsMap
+ }
+
+ if len(s.aggregations) > 0 {
+ aggsMap := make(map[string]interface{})
+ for name, aggregate := range s.aggregations {
+ aggsMap[name] = aggregate.Source()
+ }
+ source["aggregations"] = aggsMap
+ }
+
+ if s.highlight != nil {
+ source["highlight"] = s.highlight.Source()
+ }
+
+ if len(s.suggesters) > 0 {
+ suggesters := make(map[string]interface{})
+ for _, s := range s.suggesters {
+ suggesters[s.Name()] = s.Source(false)
+ }
+ if s.globalSuggestText != "" {
+ suggesters["text"] = s.globalSuggestText
+ }
+ source["suggest"] = suggesters
+ }
+
+ if len(s.rescores) > 0 {
+ // Strip empty rescores from request
+ rescores := make([]*Rescore, 0)
+ for _, r := range s.rescores {
+ if !r.IsEmpty() {
+ rescores = append(rescores, r)
+ }
+ }
+
+ if len(rescores) == 1 {
+ rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize
+ source["rescore"] = rescores[0].Source()
+ } else {
+ slice := make([]interface{}, 0)
+ for _, r := range rescores {
+ r.defaultRescoreWindowSize = s.defaultRescoreWindowSize
+ slice = append(slice, r.Source())
+ }
+ source["rescore"] = slice
+ }
+ }
+
+ if len(s.stats) > 0 {
+ source["stats"] = s.stats
+ }
+
+ if len(s.innerHits) > 0 {
+ // Top-level inner hits
+ // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits
+ // "inner_hits": {
+ // "": {
+ // "": {
+ // "": {
+ // ,
+ // [,"inner_hits" : { []+ } ]?
+ // }
+ // }
+ // },
+ // [,"" : { ... } ]*
+ // }
+ m := make(map[string]interface{})
+ for name, hit := range s.innerHits {
+ if hit.path != "" {
+ path := make(map[string]interface{})
+ path[hit.path] = hit.Source()
+ m[name] = map[string]interface{}{
+ "path": path,
+ }
+ } else if hit.typ != "" {
+ typ := make(map[string]interface{})
+ typ[hit.typ] = hit.Source()
+ m[name] = map[string]interface{}{
+ "type": typ,
+ }
+ } else {
+ // TODO the Java client throws here, because either path or typ must be specified
+ }
+ }
+ source["inner_hits"] = m
+ }
+
+ return source
+}
+
+// -- Script Field --
+
+type ScriptField struct {
+ FieldName string
+
+ script string
+ lang string
+ params map[string]interface{}
+}
+
+func NewScriptField(fieldName, script, lang string, params map[string]interface{}) *ScriptField {
+ return &ScriptField{fieldName, script, lang, params}
+}
+
+func (f *ScriptField) Source() interface{} {
+ source := make(map[string]interface{})
+ source["script"] = f.script
+ if f.lang != "" {
+ source["lang"] = f.lang
+ }
+ if f.params != nil && len(f.params) > 0 {
+ source["params"] = f.params
+ }
+ return source
+}
+
+// -- Partial Field --
+
+type PartialField struct {
+ Name string
+ includes []string
+ excludes []string
+}
+
+func NewPartialField(name string, includes, excludes []string) *PartialField {
+ return &PartialField{name, includes, excludes}
+}
+
+func (f *PartialField) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if f.includes != nil {
+ switch len(f.includes) {
+ case 0:
+ case 1:
+ source["include"] = f.includes[0]
+ default:
+ source["include"] = f.includes
+ }
+ }
+
+ if f.excludes != nil {
+ switch len(f.excludes) {
+ case 0:
+ case 1:
+ source["exclude"] = f.excludes[0]
+ default:
+ source["exclude"] = f.excludes
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort.go
new file mode 100644
index 0000000..b1b54f9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/sort.go
@@ -0,0 +1,487 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- Sorter --
+
+// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html.
+type Sorter interface {
+ Source() interface{}
+}
+
+// -- SortInfo --
+
+// SortInfo contains information about sorting a field.
+type SortInfo struct {
+ Sorter
+ Field string
+ Ascending bool
+ Missing interface{}
+ IgnoreUnmapped *bool
+ SortMode string
+ NestedFilter Filter
+ NestedPath string
+}
+
+func (info SortInfo) Source() interface{} {
+ prop := make(map[string]interface{})
+ if info.Ascending {
+ prop["order"] = "asc"
+ } else {
+ prop["order"] = "desc"
+ }
+ if info.Missing != nil {
+ prop["missing"] = info.Missing
+ }
+ if info.IgnoreUnmapped != nil {
+ prop["ignore_unmapped"] = *info.IgnoreUnmapped
+ }
+ if info.SortMode != "" {
+ prop["sort_mode"] = info.SortMode
+ }
+ if info.NestedFilter != nil {
+ prop["nested_filter"] = info.NestedFilter
+ }
+ if info.NestedPath != "" {
+ prop["nested_path"] = info.NestedPath
+ }
+ source := make(map[string]interface{})
+ source[info.Field] = prop
+ return source
+}
+
+// -- ScoreSort --
+
+// ScoreSort sorts by relevancy score.
+type ScoreSort struct {
+ Sorter
+ ascending bool
+}
+
+// NewScoreSort creates a new ScoreSort.
+func NewScoreSort() ScoreSort {
+ return ScoreSort{ascending: false} // Descending by default!
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s ScoreSort) Order(ascending bool) ScoreSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s ScoreSort) Asc() ScoreSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s ScoreSort) Desc() ScoreSort {
+ s.ascending = false
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s ScoreSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_score"] = x
+ if s.ascending {
+ x["reverse"] = true
+ }
+ return source
+}
+
+// -- FieldSort --
+
+// FieldSort sorts by a given field.
+type FieldSort struct {
+ Sorter
+ fieldName string
+ ascending bool
+ missing interface{}
+ ignoreUnmapped *bool
+ unmappedType *string
+ sortMode *string
+ nestedFilter Filter
+ nestedPath *string
+}
+
+// NewFieldSort creates a new FieldSort.
+func NewFieldSort(fieldName string) FieldSort {
+ return FieldSort{
+ fieldName: fieldName,
+ ascending: true,
+ }
+}
+
+// FieldName specifies the name of the field to be used for sorting.
+func (s FieldSort) FieldName(fieldName string) FieldSort {
+ s.fieldName = fieldName
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s FieldSort) Order(ascending bool) FieldSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s FieldSort) Asc() FieldSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s FieldSort) Desc() FieldSort {
+ s.ascending = false
+ return s
+}
+
+// Missing sets the value to be used when a field is missing in a document.
+// You can also use "_last" or "_first" to sort missing last or first
+// respectively.
+func (s FieldSort) Missing(missing interface{}) FieldSort {
+ s.missing = missing
+ return s
+}
+
+// IgnoreUnmapped specifies what happens if the field does not exist in
+// the index. Set it to true to ignore, or set it to false to not ignore (default).
+func (s FieldSort) IgnoreUnmapped(ignoreUnmapped bool) FieldSort {
+ s.ignoreUnmapped = &ignoreUnmapped
+ return s
+}
+
+// UnmappedType sets the type to use when the current field is not mapped
+// in an index.
+func (s FieldSort) UnmappedType(typ string) FieldSort {
+ s.unmappedType = &typ
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min, max, sum, and avg.
+func (s FieldSort) SortMode(sortMode string) FieldSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s FieldSort) NestedFilter(nestedFilter Filter) FieldSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s FieldSort) NestedPath(nestedPath string) FieldSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s FieldSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source[s.fieldName] = x
+ if s.ascending {
+ x["order"] = "asc"
+ } else {
+ x["order"] = "desc"
+ }
+ if s.missing != nil {
+ x["missing"] = s.missing
+ }
+ if s.ignoreUnmapped != nil {
+ x["ignore_unmapped"] = *s.ignoreUnmapped
+ }
+ if s.unmappedType != nil {
+ x["unmapped_type"] = *s.unmappedType
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ x["nested_filter"] = s.nestedFilter.Source()
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ return source
+}
+
+// -- GeoDistanceSort --
+
+// GeoDistanceSort allows for sorting by geographic distance.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting.
+type GeoDistanceSort struct {
+ Sorter
+ fieldName string
+ points []*GeoPoint
+ geohashes []string
+ geoDistance *string
+ unit string
+ ascending bool
+ sortMode *string
+ nestedFilter Filter
+ nestedPath *string
+}
+
+// NewGeoDistanceSort creates a new sorter for geo distances.
+func NewGeoDistanceSort(fieldName string) GeoDistanceSort {
+ return GeoDistanceSort{
+ fieldName: fieldName,
+ points: make([]*GeoPoint, 0),
+ geohashes: make([]string, 0),
+ ascending: true,
+ }
+}
+
+// FieldName specifies the name of the (geo) field to use for sorting.
+func (s GeoDistanceSort) FieldName(fieldName string) GeoDistanceSort {
+ s.fieldName = fieldName
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s GeoDistanceSort) Order(ascending bool) GeoDistanceSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s GeoDistanceSort) Asc() GeoDistanceSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s GeoDistanceSort) Desc() GeoDistanceSort {
+ s.ascending = false
+ return s
+}
+
+// Point specifies a point to create the range distance facets from.
+func (s GeoDistanceSort) Point(lat, lon float64) GeoDistanceSort {
+ s.points = append(s.points, GeoPointFromLatLon(lat, lon))
+ return s
+}
+
+// Points specifies the geo point(s) to create the range distance facets from.
+func (s GeoDistanceSort) Points(points ...*GeoPoint) GeoDistanceSort {
+ s.points = append(s.points, points...)
+ return s
+}
+
+// GeoHashes specifies the geo point to create the range distance facets from.
+func (s GeoDistanceSort) GeoHashes(geohashes ...string) GeoDistanceSort {
+ s.geohashes = append(s.geohashes, geohashes...)
+ return s
+}
+
+// GeoDistance represents how to compute the distance.
+// It can be sloppy_arc (default), arc, or plane.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#_geo_distance_sorting.
+func (s GeoDistanceSort) GeoDistance(geoDistance string) GeoDistanceSort {
+ s.geoDistance = &geoDistance
+ return s
+}
+
+// Unit specifies the distance unit to use. It defaults to km.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#distance-units
+// for details.
+func (s GeoDistanceSort) Unit(unit string) GeoDistanceSort {
+ s.unit = unit
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min, max, sum, and avg.
+func (s GeoDistanceSort) SortMode(sortMode string) GeoDistanceSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s GeoDistanceSort) NestedFilter(nestedFilter Filter) GeoDistanceSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s GeoDistanceSort) NestedPath(nestedPath string) GeoDistanceSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s GeoDistanceSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_geo_distance"] = x
+
+ // Points
+ ptarr := make([]interface{}, 0)
+ for _, pt := range s.points {
+ ptarr = append(ptarr, pt.Source())
+ }
+ for _, geohash := range s.geohashes {
+ ptarr = append(ptarr, geohash)
+ }
+ x[s.fieldName] = ptarr
+
+ if s.unit != "" {
+ x["unit"] = s.unit
+ }
+ if s.geoDistance != nil {
+ x["distance_type"] = *s.geoDistance
+ }
+
+ if !s.ascending {
+ x["reverse"] = true
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ x["nested_filter"] = s.nestedFilter.Source()
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ return source
+}
+
+// -- ScriptSort --
+
+// ScriptSort sorts by a custom script. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting
+// for details about scripting.
+type ScriptSort struct {
+ Sorter
+ lang string
+ script string
+ typ string
+ params map[string]interface{}
+ ascending bool
+ sortMode *string
+ nestedFilter Filter
+ nestedPath *string
+}
+
+// NewScriptSort creates a new ScriptSort.
+func NewScriptSort(script, typ string) ScriptSort {
+ return ScriptSort{
+ script: script,
+ typ: typ,
+ ascending: true,
+ params: make(map[string]interface{}),
+ }
+}
+
+// Lang specifies the script language to use. It can be one of:
+// groovy (the default for ES >= 1.4), mvel (default for ES < 1.4),
+// js, python, expression, or native. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#modules-scripting
+// for details.
+func (s ScriptSort) Lang(lang string) ScriptSort {
+ s.lang = lang
+ return s
+}
+
+// Type sets the script type, which can be either string or number.
+func (s ScriptSort) Type(typ string) ScriptSort {
+ s.typ = typ
+ return s
+}
+
+// Param adds a parameter to the script.
+func (s ScriptSort) Param(name string, value interface{}) ScriptSort {
+ s.params[name] = value
+ return s
+}
+
+// Params sets the parameters of the script.
+func (s ScriptSort) Params(params map[string]interface{}) ScriptSort {
+ s.params = params
+ return s
+}
+
+// Order defines whether sorting ascending (default) or descending.
+func (s ScriptSort) Order(ascending bool) ScriptSort {
+ s.ascending = ascending
+ return s
+}
+
+// Asc sets ascending sort order.
+func (s ScriptSort) Asc() ScriptSort {
+ s.ascending = true
+ return s
+}
+
+// Desc sets descending sort order.
+func (s ScriptSort) Desc() ScriptSort {
+ s.ascending = false
+ return s
+}
+
+// SortMode specifies what values to pick in case a document contains
+// multiple values for the targeted sort field. Possible values are:
+// min or max.
+func (s ScriptSort) SortMode(sortMode string) ScriptSort {
+ s.sortMode = &sortMode
+ return s
+}
+
+// NestedFilter sets a filter that nested objects should match with
+// in order to be taken into account for sorting.
+func (s ScriptSort) NestedFilter(nestedFilter Filter) ScriptSort {
+ s.nestedFilter = nestedFilter
+ return s
+}
+
+// NestedPath is used if sorting occurs on a field that is inside a
+// nested object.
+func (s ScriptSort) NestedPath(nestedPath string) ScriptSort {
+ s.nestedPath = &nestedPath
+ return s
+}
+
+// Source returns the JSON-serializable data.
+func (s ScriptSort) Source() interface{} {
+ source := make(map[string]interface{})
+ x := make(map[string]interface{})
+ source["_script"] = x
+
+ x["script"] = s.script
+ x["type"] = s.typ
+ if !s.ascending {
+ x["reverse"] = true
+ }
+ if s.lang != "" {
+ x["lang"] = s.lang
+ }
+ if len(s.params) > 0 {
+ x["params"] = s.params
+ }
+ if s.sortMode != nil {
+ x["mode"] = *s.sortMode
+ }
+ if s.nestedFilter != nil {
+ x["nested_filter"] = s.nestedFilter.Source()
+ }
+ if s.nestedPath != nil {
+ x["nested_path"] = *s.nestedPath
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest.go
new file mode 100644
index 0000000..9b4060c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest.go
@@ -0,0 +1,144 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// SuggestService returns suggestions for text.
+type SuggestService struct {
+ client *Client
+ pretty bool
+ routing string
+ preference string
+ indices []string
+ suggesters []Suggester
+}
+
+func NewSuggestService(client *Client) *SuggestService {
+ builder := &SuggestService{
+ client: client,
+ indices: make([]string, 0),
+ suggesters: make([]Suggester, 0),
+ }
+ return builder
+}
+
+func (s *SuggestService) Index(index string) *SuggestService {
+ s.indices = append(s.indices, index)
+ return s
+}
+
+func (s *SuggestService) Indices(indices ...string) *SuggestService {
+ s.indices = append(s.indices, indices...)
+ return s
+}
+
+func (s *SuggestService) Pretty(pretty bool) *SuggestService {
+ s.pretty = pretty
+ return s
+}
+
+func (s *SuggestService) Routing(routing string) *SuggestService {
+ s.routing = routing
+ return s
+}
+
+func (s *SuggestService) Preference(preference string) *SuggestService {
+ s.preference = preference
+ return s
+}
+
+func (s *SuggestService) Suggester(suggester Suggester) *SuggestService {
+ s.suggesters = append(s.suggesters, suggester)
+ return s
+}
+
+func (s *SuggestService) Do() (SuggestResult, error) {
+ // Build url
+ path := "/"
+
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ if err != nil {
+ return nil, err
+ }
+ indexPart = append(indexPart, index)
+ }
+ path += strings.Join(indexPart, ",")
+
+ // Suggest
+ path += "/_suggest"
+
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ }
+ if s.routing != "" {
+ params.Set("routing", s.routing)
+ }
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ }
+
+ // Set body
+ body := make(map[string]interface{})
+ for _, s := range s.suggesters {
+ body[s.Name()] = s.Source(false)
+ }
+
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // There is a _shard object that cannot be deserialized.
+ // So we use json.RawMessage instead.
+ var suggestions map[string]*json.RawMessage
+ if err := json.Unmarshal(res.Body, &suggestions); err != nil {
+ return nil, err
+ }
+
+ ret := make(SuggestResult)
+ for name, result := range suggestions {
+ if name != "_shards" {
+ var s []Suggestion
+ if err := json.Unmarshal(*result, &s); err != nil {
+ return nil, err
+ }
+ ret[name] = s
+ }
+ }
+
+ return ret, nil
+}
+
+type SuggestResult map[string][]Suggestion
+
+type Suggestion struct {
+ Text string `json:"text"`
+ Offset int `json:"offset"`
+ Length int `json:"length"`
+ Options []suggestionOption `json:"options"`
+}
+
+type suggestionOption struct {
+ Text string `json:"text"`
+ Score float32 `json:"score"`
+ Freq int `json:"freq"`
+ Payload interface{} `json:"payload"`
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_field.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_field.go
new file mode 100644
index 0000000..60f9481
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggest_field.go
@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+)
+
+// SuggestField can be used by the caller to specify a suggest field
+// at index time. For a detailed example, see e.g.
+// http://www.elasticsearch.org/blog/you-complete-me/.
+type SuggestField struct {
+ inputs []string
+ output *string
+ payload interface{}
+ weight int
+}
+
+func NewSuggestField() *SuggestField {
+ return &SuggestField{weight: -1}
+}
+
+func (f *SuggestField) Input(input ...string) *SuggestField {
+ if f.inputs == nil {
+ f.inputs = make([]string, 0)
+ }
+ f.inputs = append(f.inputs, input...)
+ return f
+}
+
+func (f *SuggestField) Output(output string) *SuggestField {
+ f.output = &output
+ return f
+}
+
+func (f *SuggestField) Payload(payload interface{}) *SuggestField {
+ f.payload = payload
+ return f
+}
+
+func (f *SuggestField) Weight(weight int) *SuggestField {
+ f.weight = weight
+ return f
+}
+
+// MarshalJSON encodes SuggestField into JSON.
+func (f *SuggestField) MarshalJSON() ([]byte, error) {
+ source := make(map[string]interface{})
+
+ if f.inputs != nil {
+ switch len(f.inputs) {
+ case 1:
+ source["input"] = f.inputs[0]
+ default:
+ source["input"] = f.inputs
+ }
+ }
+
+ if f.output != nil {
+ source["output"] = *f.output
+ }
+
+ if f.payload != nil {
+ source["payload"] = f.payload
+ }
+
+ if f.weight >= 0 {
+ source["weight"] = f.weight
+ }
+
+ return json.Marshal(source)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester.go
new file mode 100644
index 0000000..c83d050
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester.go
@@ -0,0 +1,15 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic suggester interface.
+// A suggester's only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Suggester interface {
+ Name() string
+ Source(includeName bool) interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion.go
new file mode 100644
index 0000000..e38c38f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion.go
@@ -0,0 +1,121 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CompletionSuggester is a fast suggester for e.g. type-ahead completion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html
+// for more details.
+type CompletionSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+}
+
+// Creates a new completion suggester.
+func NewCompletionSuggester(name string) CompletionSuggester {
+ return CompletionSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q CompletionSuggester) Name() string {
+ return q.name
+}
+
+func (q CompletionSuggester) Text(text string) CompletionSuggester {
+ q.text = text
+ return q
+}
+
+func (q CompletionSuggester) Field(field string) CompletionSuggester {
+ q.field = field
+ return q
+}
+
+func (q CompletionSuggester) Analyzer(analyzer string) CompletionSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q CompletionSuggester) Size(size int) CompletionSuggester {
+ q.size = &size
+ return q
+}
+
+func (q CompletionSuggester) ShardSize(shardSize int) CompletionSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q CompletionSuggester) ContextQuery(query SuggesterContextQuery) CompletionSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) CompletionSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+// completionSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the completion element.
+type completionSuggesterRequest struct {
+ Text string `json:"text"`
+ Completion interface{} `json:"completion"`
+}
+
+// Creates the source for the completion suggester.
+func (q CompletionSuggester) Source(includeName bool) interface{} {
+ cs := &completionSuggesterRequest{}
+
+ if q.text != "" {
+ cs.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ cs.Completion = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // TODO(oe) Add competion-suggester specific parameters here
+
+ if !includeName {
+ return cs
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = cs
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy.go
new file mode 100644
index 0000000..3539381
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_completion_fuzzy.go
@@ -0,0 +1,171 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy
+// completion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html
+// for details, and
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-completion.html#fuzzy
+// for details about the fuzzy completion suggester.
+type FuzzyCompletionSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ fuzziness interface{}
+ fuzzyTranspositions *bool
+ fuzzyMinLength *int
+ fuzzyPrefixLength *int
+ unicodeAware *bool
+}
+
+// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester.
+type Fuzziness struct {
+}
+
+// Creates a new completion suggester.
+func NewFuzzyCompletionSuggester(name string) FuzzyCompletionSuggester {
+ return FuzzyCompletionSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q FuzzyCompletionSuggester) Name() string {
+ return q.name
+}
+
+func (q FuzzyCompletionSuggester) Text(text string) FuzzyCompletionSuggester {
+ q.text = text
+ return q
+}
+
+func (q FuzzyCompletionSuggester) Field(field string) FuzzyCompletionSuggester {
+ q.field = field
+ return q
+}
+
+func (q FuzzyCompletionSuggester) Analyzer(analyzer string) FuzzyCompletionSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q FuzzyCompletionSuggester) Size(size int) FuzzyCompletionSuggester {
+ q.size = &size
+ return q
+}
+
+func (q FuzzyCompletionSuggester) ShardSize(shardSize int) FuzzyCompletionSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) FuzzyCompletionSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) FuzzyCompletionSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+// Fuzziness defines the strategy used to describe what "fuzzy" actually
+// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/common-options.html#fuzziness
+// for a detailed description.
+func (q FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) FuzzyCompletionSuggester {
+ q.fuzziness = fuzziness
+ return q
+}
+
+func (q FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) FuzzyCompletionSuggester {
+ q.fuzzyTranspositions = &fuzzyTranspositions
+ return q
+}
+
+func (q FuzzyCompletionSuggester) FuzzyMinLength(minLength int) FuzzyCompletionSuggester {
+ q.fuzzyMinLength = &minLength
+ return q
+}
+
+func (q FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) FuzzyCompletionSuggester {
+ q.fuzzyPrefixLength = &prefixLength
+ return q
+}
+
+func (q FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) FuzzyCompletionSuggester {
+ q.unicodeAware = &unicodeAware
+ return q
+}
+
+// Creates the source for the completion suggester.
+func (q FuzzyCompletionSuggester) Source(includeName bool) interface{} {
+ cs := &completionSuggesterRequest{}
+
+ if q.text != "" {
+ cs.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ cs.Completion = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // Fuzzy Completion Suggester fields
+ fuzzy := make(map[string]interface{})
+ suggester["fuzzy"] = fuzzy
+ if q.fuzziness != nil {
+ fuzzy["fuzziness"] = q.fuzziness
+ }
+ if q.fuzzyTranspositions != nil {
+ fuzzy["transpositions"] = *q.fuzzyTranspositions
+ }
+ if q.fuzzyMinLength != nil {
+ fuzzy["min_length"] = *q.fuzzyMinLength
+ }
+ if q.fuzzyPrefixLength != nil {
+ fuzzy["prefix_length"] = *q.fuzzyPrefixLength
+ }
+ if q.unicodeAware != nil {
+ fuzzy["unicode_aware"] = *q.unicodeAware
+ }
+
+ if !includeName {
+ return cs
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = cs
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context.go
new file mode 100644
index 0000000..96d6c9e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context.go
@@ -0,0 +1,11 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SuggesterContextQuery is used to define context information within
+// a suggestion request.
+type SuggesterContextQuery interface {
+ Source() interface{}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category.go
new file mode 100644
index 0000000..1699c7b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_category.go
@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- SuggesterCategoryMapping --
+
+// SuggesterCategoryMapping provides a mapping for a category context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_mapping.
+type SuggesterCategoryMapping struct {
+ name string
+ fieldName string
+ defaultValues []string
+}
+
+// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping.
+func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping {
+ return &SuggesterCategoryMapping{
+ name: name,
+ defaultValues: make([]string, 0),
+ }
+}
+
+func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping {
+ q.defaultValues = append(q.defaultValues, values...)
+ return q
+}
+
+func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping {
+ q.fieldName = fieldName
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterCategoryMapping) Source() interface{} {
+ source := make(map[string]interface{})
+
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ x["type"] = "category"
+
+ switch len(q.defaultValues) {
+ case 0:
+ x["default"] = q.defaultValues
+ case 1:
+ x["default"] = q.defaultValues[0]
+ default:
+ x["default"] = q.defaultValues
+ }
+
+ if q.fieldName != "" {
+ x["path"] = q.fieldName
+ }
+ return source
+}
+
+// -- SuggesterCategoryQuery --
+
+// SuggesterCategoryQuery provides querying a category context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_category_query.
+type SuggesterCategoryQuery struct {
+ name string
+ values []string
+}
+
+// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery.
+func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery {
+ q := &SuggesterCategoryQuery{
+ name: name,
+ values: make([]string, 0),
+ }
+ if len(values) > 0 {
+ q.values = append(q.values, values...)
+ }
+ return q
+}
+
+func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery {
+ q.values = append(q.values, values...)
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterCategoryQuery) Source() interface{} {
+ source := make(map[string]interface{})
+
+ switch len(q.values) {
+ case 0:
+ source[q.name] = q.values
+ case 1:
+ source[q.name] = q.values[0]
+ default:
+ source[q.name] = q.values
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo.go
new file mode 100644
index 0000000..116fe9e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_context_geo.go
@@ -0,0 +1,132 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// -- SuggesterGeoMapping --
+
+// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_mapping.
+type SuggesterGeoMapping struct {
+ name string
+ defaultLocations []*GeoPoint
+ precision []string
+ neighbors *bool
+ fieldName string
+}
+
+// NewSuggesterGeoMapping creates a new SuggesterGeoMapping.
+func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping {
+ return &SuggesterGeoMapping{
+ name: name,
+ defaultLocations: make([]*GeoPoint, 0),
+ precision: make([]string, 0),
+ }
+}
+
+func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping {
+ q.defaultLocations = append(q.defaultLocations, locations...)
+ return q
+}
+
+func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping {
+ q.precision = append(q.precision, precision...)
+ return q
+}
+
+func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping {
+ q.neighbors = &neighbors
+ return q
+}
+
+func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping {
+ q.fieldName = fieldName
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterGeoMapping) Source() interface{} {
+ source := make(map[string]interface{})
+
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ x["type"] = "geo"
+
+ if len(q.precision) > 0 {
+ x["precision"] = q.precision
+ }
+ if q.neighbors != nil {
+ x["neighbors"] = *q.neighbors
+ }
+
+ switch len(q.defaultLocations) {
+ case 0:
+ case 1:
+ x["default"] = q.defaultLocations[0].Source()
+ default:
+ arr := make([]interface{}, 0)
+ for _, p := range q.defaultLocations {
+ arr = append(arr, p.Source())
+ }
+ x["default"] = arr
+ }
+
+ if q.fieldName != "" {
+ x["path"] = q.fieldName
+ }
+ return source
+}
+
+// -- SuggesterGeoQuery --
+
+// SuggesterGeoQuery provides querying a geolocation context in a suggester.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/suggester-context.html#_geo_location_query
+type SuggesterGeoQuery struct {
+ name string
+ location *GeoPoint
+ precision []string
+}
+
+// NewSuggesterGeoQuery creates a new SuggesterGeoQuery.
+func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery {
+ return &SuggesterGeoQuery{
+ name: name,
+ location: location,
+ precision: make([]string, 0),
+ }
+}
+
+func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery {
+ q.precision = append(q.precision, precision...)
+ return q
+}
+
+// Source returns a map that will be used to serialize the context query as JSON.
+func (q *SuggesterGeoQuery) Source() interface{} {
+ source := make(map[string]interface{})
+
+ if len(q.precision) == 0 {
+ if q.location != nil {
+ source[q.name] = q.location.Source()
+ }
+ } else {
+ x := make(map[string]interface{})
+ source[q.name] = x
+
+ if q.location != nil {
+ x["value"] = q.location.Source()
+ }
+
+ switch len(q.precision) {
+ case 0:
+ case 1:
+ x["precision"] = q.precision[0]
+ default:
+ x["precision"] = q.precision
+ }
+ }
+
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase.go
new file mode 100644
index 0000000..d25c4f7
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_phrase.go
@@ -0,0 +1,538 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/api/search/phrase-suggest/
+type PhraseSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ // fields specific to a phrase suggester
+ maxErrors *float32
+ separator *string
+ realWordErrorLikelihood *float32
+ confidence *float32
+ generators map[string][]CandidateGenerator
+ gramSize *int
+ smoothingModel SmoothingModel
+ forceUnigrams *bool
+ tokenLimit *int
+ preTag, postTag *string
+ collateQuery *string
+ collateFilter *string
+ collatePreference *string
+ collateParams map[string]interface{}
+ collatePrune *bool
+}
+
+// Creates a new phrase suggester.
+func NewPhraseSuggester(name string) PhraseSuggester {
+ return PhraseSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ collateParams: make(map[string]interface{}),
+ }
+}
+
+func (q PhraseSuggester) Name() string {
+ return q.name
+}
+
+func (q PhraseSuggester) Text(text string) PhraseSuggester {
+ q.text = text
+ return q
+}
+
+func (q PhraseSuggester) Field(field string) PhraseSuggester {
+ q.field = field
+ return q
+}
+
+func (q PhraseSuggester) Analyzer(analyzer string) PhraseSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q PhraseSuggester) Size(size int) PhraseSuggester {
+ q.size = &size
+ return q
+}
+
+func (q PhraseSuggester) ShardSize(shardSize int) PhraseSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q PhraseSuggester) ContextQuery(query SuggesterContextQuery) PhraseSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) PhraseSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+func (q PhraseSuggester) GramSize(gramSize int) PhraseSuggester {
+ if gramSize >= 1 {
+ q.gramSize = &gramSize
+ }
+ return q
+}
+
+func (q PhraseSuggester) MaxErrors(maxErrors float32) PhraseSuggester {
+ q.maxErrors = &maxErrors
+ return q
+}
+
+func (q PhraseSuggester) Separator(separator string) PhraseSuggester {
+ q.separator = &separator
+ return q
+}
+
+func (q PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float32) PhraseSuggester {
+ q.realWordErrorLikelihood = &realWordErrorLikelihood
+ return q
+}
+
+func (q PhraseSuggester) Confidence(confidence float32) PhraseSuggester {
+ q.confidence = &confidence
+ return q
+}
+
+func (q PhraseSuggester) CandidateGenerator(generator CandidateGenerator) PhraseSuggester {
+ if q.generators == nil {
+ q.generators = make(map[string][]CandidateGenerator)
+ }
+ typ := generator.Type()
+ if _, found := q.generators[typ]; !found {
+ q.generators[typ] = make([]CandidateGenerator, 0)
+ }
+ q.generators[typ] = append(q.generators[typ], generator)
+ return q
+}
+
+func (q PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) PhraseSuggester {
+ for _, g := range generators {
+ q = q.CandidateGenerator(g)
+ }
+ return q
+}
+
+func (q PhraseSuggester) ClearCandidateGenerator() PhraseSuggester {
+ q.generators = nil
+ return q
+}
+
+func (q PhraseSuggester) ForceUnigrams(forceUnigrams bool) PhraseSuggester {
+ q.forceUnigrams = &forceUnigrams
+ return q
+}
+
+func (q PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) PhraseSuggester {
+ q.smoothingModel = smoothingModel
+ return q
+}
+
+func (q PhraseSuggester) TokenLimit(tokenLimit int) PhraseSuggester {
+ q.tokenLimit = &tokenLimit
+ return q
+}
+
+func (q PhraseSuggester) Highlight(preTag, postTag string) PhraseSuggester {
+ q.preTag = &preTag
+ q.postTag = &postTag
+ return q
+}
+
+func (q PhraseSuggester) CollateQuery(collateQuery string) PhraseSuggester {
+ q.collateQuery = &collateQuery
+ return q
+}
+
+func (q PhraseSuggester) CollateFilter(collateFilter string) PhraseSuggester {
+ q.collateFilter = &collateFilter
+ return q
+}
+
+func (q PhraseSuggester) CollatePreference(collatePreference string) PhraseSuggester {
+ q.collatePreference = &collatePreference
+ return q
+}
+
+func (q PhraseSuggester) CollateParams(collateParams map[string]interface{}) PhraseSuggester {
+ q.collateParams = collateParams
+ return q
+}
+
+func (q PhraseSuggester) CollatePrune(collatePrune bool) PhraseSuggester {
+ q.collatePrune = &collatePrune
+ return q
+}
+
+// simplePhraseSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the simple_phrase element.
+type phraseSuggesterRequest struct {
+ Text string `json:"text"`
+ Phrase interface{} `json:"phrase"`
+}
+
+// Creates the source for the phrase suggester.
+func (q PhraseSuggester) Source(includeName bool) interface{} {
+ ps := &phraseSuggesterRequest{}
+
+ if q.text != "" {
+ ps.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ ps.Phrase = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // Phase-specified parameters
+ if q.realWordErrorLikelihood != nil {
+ suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood
+ }
+ if q.confidence != nil {
+ suggester["confidence"] = *q.confidence
+ }
+ if q.separator != nil {
+ suggester["separator"] = *q.separator
+ }
+ if q.maxErrors != nil {
+ suggester["max_errors"] = *q.maxErrors
+ }
+ if q.gramSize != nil {
+ suggester["gram_size"] = *q.gramSize
+ }
+ if q.forceUnigrams != nil {
+ suggester["force_unigrams"] = *q.forceUnigrams
+ }
+ if q.tokenLimit != nil {
+ suggester["token_limit"] = *q.tokenLimit
+ }
+ if q.generators != nil && len(q.generators) > 0 {
+ for typ, generators := range q.generators {
+ arr := make([]interface{}, 0)
+ for _, g := range generators {
+ arr = append(arr, g.Source())
+ }
+ suggester[typ] = arr
+ }
+ }
+ if q.smoothingModel != nil {
+ x := make(map[string]interface{})
+ x[q.smoothingModel.Type()] = q.smoothingModel.Source()
+ suggester["smoothing"] = x
+ }
+ if q.preTag != nil {
+ hl := make(map[string]string)
+ hl["pre_tag"] = *q.preTag
+ if q.postTag != nil {
+ hl["post_tag"] = *q.postTag
+ }
+ suggester["highlight"] = hl
+ }
+ if q.collateQuery != nil || q.collateFilter != nil {
+ collate := make(map[string]interface{})
+ suggester["collate"] = collate
+ if q.collateQuery != nil {
+ collate["query"] = *q.collateQuery
+ }
+ if q.collateFilter != nil {
+ collate["filter"] = *q.collateFilter
+ }
+ if q.collatePreference != nil {
+ collate["preference"] = *q.collatePreference
+ }
+ if len(q.collateParams) > 0 {
+ collate["params"] = q.collateParams
+ }
+ if q.collatePrune != nil {
+ collate["prune"] = *q.collatePrune
+ }
+ }
+
+ if !includeName {
+ return ps
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = ps
+ return source
+}
+
+// -- Smoothing models --
+
+type SmoothingModel interface {
+ Type() string
+ Source() interface{}
+}
+
+// StupidBackoffSmoothingModel implements a stupid backoff smoothing model.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type StupidBackoffSmoothingModel struct {
+ discount float64
+}
+
+func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel {
+ return &StupidBackoffSmoothingModel{
+ discount: discount,
+ }
+}
+
+func (sm *StupidBackoffSmoothingModel) Type() string {
+ return "stupid_backoff"
+}
+
+func (sm *StupidBackoffSmoothingModel) Source() interface{} {
+ source := make(map[string]interface{})
+ source["discount"] = sm.discount
+ return source
+}
+
+// --
+
+// LaplaceSmoothingModel implements a laplace smoothing model.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type LaplaceSmoothingModel struct {
+ alpha float64
+}
+
+func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel {
+ return &LaplaceSmoothingModel{
+ alpha: alpha,
+ }
+}
+
+func (sm *LaplaceSmoothingModel) Type() string {
+ return "laplace"
+}
+
+func (sm *LaplaceSmoothingModel) Source() interface{} {
+ source := make(map[string]interface{})
+ source["alpha"] = sm.alpha
+ return source
+}
+
+// --
+
+// LinearInterpolationSmoothingModel implements a linear interpolation
+// smoothing model.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type LinearInterpolationSmoothingModel struct {
+ trigramLamda float64
+ bigramLambda float64
+ unigramLambda float64
+}
+
+func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel {
+ return &LinearInterpolationSmoothingModel{
+ trigramLamda: trigramLamda,
+ bigramLambda: bigramLambda,
+ unigramLambda: unigramLambda,
+ }
+}
+
+func (sm *LinearInterpolationSmoothingModel) Type() string {
+ return "linear_interpolation"
+}
+
+func (sm *LinearInterpolationSmoothingModel) Source() interface{} {
+ source := make(map[string]interface{})
+ source["trigram_lambda"] = sm.trigramLamda
+ source["bigram_lambda"] = sm.bigramLambda
+ source["unigram_lambda"] = sm.unigramLambda
+ return source
+}
+
+// -- CandidateGenerator --
+
+type CandidateGenerator interface {
+ Type() string
+ Source() interface{}
+}
+
+// DirectCandidateGenerator implements a direct candidate generator.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters-phrase.html#_smoothing_models
+// for details about smoothing models.
+type DirectCandidateGenerator struct {
+ field string
+ preFilter *string
+ postFilter *string
+ suggestMode *string
+ accuracy *float64
+ size *int
+ sort *string
+ stringDistance *string
+ maxEdits *int
+ maxInspections *int
+ maxTermFreq *float64
+ prefixLength *int
+ minWordLength *int
+ minDocFreq *float64
+}
+
+func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator {
+ return &DirectCandidateGenerator{
+ field: field,
+ }
+}
+
+func (g *DirectCandidateGenerator) Type() string {
+ return "direct_generator"
+}
+
+func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator {
+ g.field = field
+ return g
+}
+
+func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator {
+ g.preFilter = &preFilter
+ return g
+}
+
+func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator {
+ g.postFilter = &postFilter
+ return g
+}
+
+func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator {
+ g.suggestMode = &suggestMode
+ return g
+}
+
+func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator {
+ g.accuracy = &accuracy
+ return g
+}
+
+func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator {
+ g.size = &size
+ return g
+}
+
+func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator {
+ g.sort = &sort
+ return g
+}
+
+func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator {
+ g.stringDistance = &stringDistance
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator {
+ g.maxEdits = &maxEdits
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator {
+ g.maxInspections = &maxInspections
+ return g
+}
+
+func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator {
+ g.maxTermFreq = &maxTermFreq
+ return g
+}
+
+func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator {
+ g.prefixLength = &prefixLength
+ return g
+}
+
+func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator {
+ g.minWordLength = &minWordLength
+ return g
+}
+
+func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator {
+ g.minDocFreq = &minDocFreq
+ return g
+}
+
+func (g *DirectCandidateGenerator) Source() interface{} {
+ source := make(map[string]interface{})
+ if g.field != "" {
+ source["field"] = g.field
+ }
+ if g.suggestMode != nil {
+ source["suggest_mode"] = *g.suggestMode
+ }
+ if g.accuracy != nil {
+ source["accuracy"] = *g.accuracy
+ }
+ if g.size != nil {
+ source["size"] = *g.size
+ }
+ if g.sort != nil {
+ source["sort"] = *g.sort
+ }
+ if g.stringDistance != nil {
+ source["string_distance"] = *g.stringDistance
+ }
+ if g.maxEdits != nil {
+ source["max_edits"] = *g.maxEdits
+ }
+ if g.maxInspections != nil {
+ source["max_inspections"] = *g.maxInspections
+ }
+ if g.maxTermFreq != nil {
+ source["max_term_freq"] = *g.maxTermFreq
+ }
+ if g.prefixLength != nil {
+ source["prefix_length"] = *g.prefixLength
+ }
+ if g.minWordLength != nil {
+ source["min_word_length"] = *g.minWordLength
+ }
+ if g.minDocFreq != nil {
+ source["min_doc_freq"] = *g.minDocFreq
+ }
+ if g.preFilter != nil {
+ source["pre_filter"] = *g.preFilter
+ }
+ if g.postFilter != nil {
+ source["post_filter"] = *g.postFilter
+ }
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term.go
new file mode 100644
index 0000000..f19484d
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/suggester_term.go
@@ -0,0 +1,225 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// For more details, see
+// http://www.elasticsearch.org/guide/reference/api/search/term-suggest/
+type TermSuggester struct {
+ Suggester
+ name string
+ text string
+ field string
+ analyzer string
+ size *int
+ shardSize *int
+ contextQueries []SuggesterContextQuery
+
+ // fields specific to term suggester
+ suggestMode string
+ accuracy *float32
+ sort string
+ stringDistance string
+ maxEdits *int
+ maxInspections *int
+ maxTermFreq *float32
+ prefixLength *int
+ minWordLength *int
+ minDocFreq *float32
+}
+
+// Creates a new term suggester.
+func NewTermSuggester(name string) TermSuggester {
+ return TermSuggester{
+ name: name,
+ contextQueries: make([]SuggesterContextQuery, 0),
+ }
+}
+
+func (q TermSuggester) Name() string {
+ return q.name
+}
+
+func (q TermSuggester) Text(text string) TermSuggester {
+ q.text = text
+ return q
+}
+
+func (q TermSuggester) Field(field string) TermSuggester {
+ q.field = field
+ return q
+}
+
+func (q TermSuggester) Analyzer(analyzer string) TermSuggester {
+ q.analyzer = analyzer
+ return q
+}
+
+func (q TermSuggester) Size(size int) TermSuggester {
+ q.size = &size
+ return q
+}
+
+func (q TermSuggester) ShardSize(shardSize int) TermSuggester {
+ q.shardSize = &shardSize
+ return q
+}
+
+func (q TermSuggester) ContextQuery(query SuggesterContextQuery) TermSuggester {
+ q.contextQueries = append(q.contextQueries, query)
+ return q
+}
+
+func (q TermSuggester) ContextQueries(queries ...SuggesterContextQuery) TermSuggester {
+ q.contextQueries = append(q.contextQueries, queries...)
+ return q
+}
+
+func (q TermSuggester) SuggestMode(suggestMode string) TermSuggester {
+ q.suggestMode = suggestMode
+ return q
+}
+
+func (q TermSuggester) Accuracy(accuracy float32) TermSuggester {
+ q.accuracy = &accuracy
+ return q
+}
+
+func (q TermSuggester) Sort(sort string) TermSuggester {
+ q.sort = sort
+ return q
+}
+
+func (q TermSuggester) StringDistance(stringDistance string) TermSuggester {
+ q.stringDistance = stringDistance
+ return q
+}
+
+func (q TermSuggester) MaxEdits(maxEdits int) TermSuggester {
+ q.maxEdits = &maxEdits
+ return q
+}
+
+func (q TermSuggester) MaxInspections(maxInspections int) TermSuggester {
+ q.maxInspections = &maxInspections
+ return q
+}
+
+func (q TermSuggester) MaxTermFreq(maxTermFreq float32) TermSuggester {
+ q.maxTermFreq = &maxTermFreq
+ return q
+}
+
+func (q TermSuggester) PrefixLength(prefixLength int) TermSuggester {
+ q.prefixLength = &prefixLength
+ return q
+}
+
+func (q TermSuggester) MinWordLength(minWordLength int) TermSuggester {
+ q.minWordLength = &minWordLength
+ return q
+}
+
+func (q TermSuggester) MinDocFreq(minDocFreq float32) TermSuggester {
+ q.minDocFreq = &minDocFreq
+ return q
+}
+
+// termSuggesterRequest is necessary because the order in which
+// the JSON elements are routed to Elasticsearch is relevant.
+// We got into trouble when using plain maps because the text element
+// needs to go before the term element.
+type termSuggesterRequest struct {
+ Text string `json:"text"`
+ Term interface{} `json:"term"`
+}
+
+// Creates the source for the term suggester.
+func (q TermSuggester) Source(includeName bool) interface{} {
+ // "suggest" : {
+ // "my-suggest-1" : {
+ // "text" : "the amsterdma meetpu",
+ // "term" : {
+ // "field" : "body"
+ // }
+ // },
+ // "my-suggest-2" : {
+ // "text" : "the rottredam meetpu",
+ // "term" : {
+ // "field" : "title",
+ // }
+ // }
+ // }
+ ts := &termSuggesterRequest{}
+ if q.text != "" {
+ ts.Text = q.text
+ }
+
+ suggester := make(map[string]interface{})
+ ts.Term = suggester
+
+ if q.analyzer != "" {
+ suggester["analyzer"] = q.analyzer
+ }
+ if q.field != "" {
+ suggester["field"] = q.field
+ }
+ if q.size != nil {
+ suggester["size"] = *q.size
+ }
+ if q.shardSize != nil {
+ suggester["shard_size"] = *q.shardSize
+ }
+ switch len(q.contextQueries) {
+ case 0:
+ case 1:
+ suggester["context"] = q.contextQueries[0].Source()
+ default:
+ ctxq := make([]interface{}, 0)
+ for _, query := range q.contextQueries {
+ ctxq = append(ctxq, query.Source())
+ }
+ suggester["context"] = ctxq
+ }
+
+ // Specific to term suggester
+ if q.suggestMode != "" {
+ suggester["suggest_mode"] = q.suggestMode
+ }
+ if q.accuracy != nil {
+ suggester["accuracy"] = *q.accuracy
+ }
+ if q.sort != "" {
+ suggester["sort"] = q.sort
+ }
+ if q.stringDistance != "" {
+ suggester["string_distance"] = q.stringDistance
+ }
+ if q.maxEdits != nil {
+ suggester["max_edits"] = *q.maxEdits
+ }
+ if q.maxInspections != nil {
+ suggester["max_inspections"] = *q.maxInspections
+ }
+ if q.maxTermFreq != nil {
+ suggester["max_term_freq"] = *q.maxTermFreq
+ }
+ if q.prefixLength != nil {
+ suggester["prefix_len"] = *q.prefixLength
+ }
+ if q.minWordLength != nil {
+ suggester["min_word_len"] = *q.minWordLength
+ }
+ if q.minDocFreq != nil {
+ suggester["min_doc_freq"] = *q.minDocFreq
+ }
+
+ if !includeName {
+ return ts
+ }
+
+ source := make(map[string]interface{})
+ source[q.name] = ts
+ return source
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update.go
new file mode 100644
index 0000000..d2595a4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/update.go
@@ -0,0 +1,342 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "gopkg.in/olivere/elastic.v2/uritemplates"
+)
+
+// UpdateResult is the result of updating a document in Elasticsearch.
+type UpdateResult struct {
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int `json:"_version"`
+ Created bool `json:"created"`
+ GetResult *GetResult `json:"get"`
+}
+
+// UpdateService updates a document in Elasticsearch.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-update.html
+// for details.
+type UpdateService struct {
+ client *Client
+ index string
+ typ string
+ id string
+ routing string
+ parent string
+ script string
+ scriptId string
+ scriptFile string
+ scriptType string
+ scriptLang string
+ scriptParams map[string]interface{}
+ fields []string
+ version *int64
+ versionType string
+ retryOnConflict *int
+ refresh *bool
+ replicationType string
+ consistencyLevel string
+ upsert interface{}
+ scriptedUpsert *bool
+ docAsUpsert *bool
+ detectNoop *bool
+ doc interface{}
+ timeout string
+ pretty bool
+}
+
+// NewUpdateService creates the service to update documents in Elasticsearch.
+func NewUpdateService(client *Client) *UpdateService {
+ builder := &UpdateService{
+ client: client,
+ scriptParams: make(map[string]interface{}),
+ fields: make([]string, 0),
+ }
+ return builder
+}
+
+// Index is the name of the Elasticsearch index (required).
+func (b *UpdateService) Index(name string) *UpdateService {
+ b.index = name
+ return b
+}
+
+// Type is the type of the document (required).
+func (b *UpdateService) Type(typ string) *UpdateService {
+ b.typ = typ
+ return b
+}
+
+// Id is the identifier of the document to update (required).
+func (b *UpdateService) Id(id string) *UpdateService {
+ b.id = id
+ return b
+}
+
+// Routing specifies a specific routing value.
+func (b *UpdateService) Routing(routing string) *UpdateService {
+ b.routing = routing
+ return b
+}
+
+// Parent sets the id of the parent document.
+func (b *UpdateService) Parent(parent string) *UpdateService {
+ b.parent = parent
+ return b
+}
+
+// Script is the URL-encoded script definition.
+func (b *UpdateService) Script(script string) *UpdateService {
+ b.script = script
+ return b
+}
+
+// ScriptId is the id of a stored script.
+func (b *UpdateService) ScriptId(scriptId string) *UpdateService {
+ b.scriptId = scriptId
+ return b
+}
+
+// ScriptFile is the file name of a stored script.
+// See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html for details.
+func (b *UpdateService) ScriptFile(scriptFile string) *UpdateService {
+ b.scriptFile = scriptFile
+ return b
+}
+
+func (b *UpdateService) ScriptType(scriptType string) *UpdateService {
+ b.scriptType = scriptType
+ return b
+}
+
+// ScriptLang defines the scripting language (default: groovy).
+func (b *UpdateService) ScriptLang(scriptLang string) *UpdateService {
+ b.scriptLang = scriptLang
+ return b
+}
+
+func (b *UpdateService) ScriptParams(params map[string]interface{}) *UpdateService {
+ b.scriptParams = params
+ return b
+}
+
+// RetryOnConflict specifies how many times the operation should be retried
+// when a conflict occurs (default: 0).
+func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService {
+ b.retryOnConflict = &retryOnConflict
+ return b
+}
+
+// Fields is a list of fields to return in the response.
+func (b *UpdateService) Fields(fields ...string) *UpdateService {
+ b.fields = make([]string, 0, len(fields))
+ b.fields = append(b.fields, fields...)
+ return b
+}
+
+// Version defines the explicit version number for concurrency control.
+func (b *UpdateService) Version(version int64) *UpdateService {
+ b.version = &version
+ return b
+}
+
+// VersionType is one of "internal" or "force".
+func (b *UpdateService) VersionType(versionType string) *UpdateService {
+ b.versionType = versionType
+ return b
+}
+
+// Refresh the index after performing the update.
+func (b *UpdateService) Refresh(refresh bool) *UpdateService {
+ b.refresh = &refresh
+ return b
+}
+
+// ReplicationType is one of "sync" or "async".
+func (b *UpdateService) ReplicationType(replicationType string) *UpdateService {
+ b.replicationType = replicationType
+ return b
+}
+
+// ConsistencyLevel is one of "one", "quorum", or "all".
+// It sets the write consistency setting for the update operation.
+func (b *UpdateService) ConsistencyLevel(consistencyLevel string) *UpdateService {
+ b.consistencyLevel = consistencyLevel
+ return b
+}
+
+// Doc allows for updating a partial document.
+func (b *UpdateService) Doc(doc interface{}) *UpdateService {
+ b.doc = doc
+ return b
+}
+
+// Upsert can be used to index the document when it doesn't exist yet.
+// Use this e.g. to initialize a document with a default value.
+func (b *UpdateService) Upsert(doc interface{}) *UpdateService {
+ b.upsert = doc
+ return b
+}
+
+// DocAsUpsert can be used to insert the document if it doesn't already exist.
+func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService {
+ b.docAsUpsert = &docAsUpsert
+ return b
+}
+
+// DetectNoop will instruct Elasticsearch to check if changes will occur
+// when updating via Doc. It there aren't any changes, the request will
+// turn into a no-op.
+func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService {
+ b.detectNoop = &detectNoop
+ return b
+}
+
+// ScriptedUpsert should be set to true if the referenced script
+// (defined in Script or ScriptId) should be called to perform an insert.
+// The default is false.
+func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService {
+ b.scriptedUpsert = &scriptedUpsert
+ return b
+}
+
+// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms".
+func (b *UpdateService) Timeout(timeout string) *UpdateService {
+ b.timeout = timeout
+ return b
+}
+
+// Pretty instructs to return human readable, prettified JSON.
+func (b *UpdateService) Pretty(pretty bool) *UpdateService {
+ b.pretty = pretty
+ return b
+}
+
+// url returns the URL part of the document request.
+func (b *UpdateService) url() (string, url.Values, error) {
+ // Build url
+ path := "/{index}/{type}/{id}/_update"
+ path, err := uritemplates.Expand(path, map[string]string{
+ "index": b.index,
+ "type": b.typ,
+ "id": b.id,
+ })
+ if err != nil {
+ return "", url.Values{}, err
+ }
+
+ // Parameters
+ params := make(url.Values)
+ if b.pretty {
+ params.Set("pretty", "true")
+ }
+ if b.routing != "" {
+ params.Set("routing", b.routing)
+ }
+ if b.parent != "" {
+ params.Set("parent", b.parent)
+ }
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ }
+ if b.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *b.refresh))
+ }
+ if b.replicationType != "" {
+ params.Set("replication", b.replicationType)
+ }
+ if b.consistencyLevel != "" {
+ params.Set("consistency", b.consistencyLevel)
+ }
+ if len(b.fields) > 0 {
+ params.Set("fields", strings.Join(b.fields, ","))
+ }
+ if b.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *b.version))
+ }
+ if b.versionType != "" {
+ params.Set("version_type", b.versionType)
+ }
+ if b.retryOnConflict != nil {
+ params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict))
+ }
+
+ return path, params, nil
+}
+
+// body returns the body part of the document request.
+func (b *UpdateService) body() (interface{}, error) {
+ source := make(map[string]interface{})
+
+ if b.script != "" {
+ source["script"] = b.script
+ }
+ if b.scriptId != "" {
+ source["script_id"] = b.scriptId
+ }
+ if b.scriptFile != "" {
+ source["script_file"] = b.scriptFile
+ }
+ if b.scriptLang != "" {
+ source["lang"] = b.scriptLang
+ }
+ if len(b.scriptParams) > 0 {
+ source["params"] = b.scriptParams
+ }
+ if b.scriptedUpsert != nil {
+ source["scripted_upsert"] = *b.scriptedUpsert
+ }
+
+ if b.upsert != nil {
+ source["upsert"] = b.upsert
+ }
+
+ if b.doc != nil {
+ source["doc"] = b.doc
+ }
+ if b.docAsUpsert != nil {
+ source["doc_as_upsert"] = *b.docAsUpsert
+ }
+ if b.detectNoop != nil {
+ source["detect_noop"] = *b.detectNoop
+ }
+
+ return source, nil
+}
+
+// Do executes the update operation.
+func (b *UpdateService) Do() (*UpdateResult, error) {
+ path, params, err := b.url()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get body of the request
+ body, err := b.body()
+ if err != nil {
+ return nil, err
+ }
+
+ // Get response
+ res, err := b.client.PerformRequest("POST", path, params, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return result
+ ret := new(UpdateResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/LICENSE b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/LICENSE
new file mode 100644
index 0000000..de9c88c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013 Joshua Tacoma
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/uritemplates.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/uritemplates.go
new file mode 100644
index 0000000..8a84813
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/uritemplates.go
@@ -0,0 +1,359 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 4 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+//
+// To use uritemplates, parse a template string and expand it with a value
+// map:
+//
+// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
+// values := make(map[string]interface{})
+// values["user"] = "jtacoma"
+// values["repo"] = "uritemplates"
+// expanded, _ := template.ExpandString(values)
+// fmt.Printf(expanded)
+//
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+func escape(s string, allowReserved bool) (escaped string) {
+ if allowReserved {
+ escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+ return escaped
+}
+
+// A UriTemplate is a parsed representation of a URI template.
+type UriTemplate struct {
+ raw string
+ parts []templatePart
+}
+
+// Parse parses a URI template string into a UriTemplate object.
+func Parse(rawtemplate string) (template *UriTemplate, err error) {
+ template = new(UriTemplate)
+ template.raw = rawtemplate
+ split := strings.Split(rawtemplate, "{")
+ template.parts = make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ err = errors.New("unexpected }")
+ break
+ }
+ template.parts[i].raw = s
+ } else {
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ err = errors.New("malformed template")
+ break
+ }
+ expression := subsplit[0]
+ template.parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ break
+ }
+ template.parts[i*2].raw = subsplit[1]
+ }
+ }
+ if err != nil {
+ template = nil
+ }
+ return template, err
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce a string.
+func (self *UriTemplate) Expand(value interface{}) (string, error) {
+ values, ismap := value.(map[string]interface{})
+ if !ismap {
+ if m, ismap := struct2map(value); !ismap {
+ return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
+ } else {
+ return self.Expand(m)
+ }
+ }
+ var buf bytes.Buffer
+ for _, p := range self.parts {
+ err := p.expand(&buf, values)
+ if err != nil {
+ return "", err
+ }
+ }
+ return buf.String(), nil
+}
+
+func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
+ if len(self.raw) > 0 {
+ buf.WriteString(self.raw)
+ return nil
+ }
+ var zeroLen = buf.Len()
+ buf.WriteString(self.first)
+ var firstLen = buf.Len()
+ for _, term := range self.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if buf.Len() != firstLen {
+ buf.WriteString(self.sep)
+ }
+ switch v := value.(type) {
+ case string:
+ self.expandString(buf, term, v)
+ case []interface{}:
+ self.expandArray(buf, term, v)
+ case map[string]interface{}:
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, v)
+ default:
+ if m, ismap := struct2map(value); ismap {
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, m)
+ } else {
+ str := fmt.Sprintf("%v", value)
+ self.expandString(buf, term, str)
+ }
+ }
+ }
+ if buf.Len() == firstLen {
+ original := buf.Bytes()[:zeroLen]
+ buf.Reset()
+ buf.Write(original)
+ }
+ return nil
+}
+
+func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
+ if self.named {
+ buf.WriteString(name)
+ if empty {
+ buf.WriteString(self.ifemp)
+ } else {
+ buf.WriteString("=")
+ }
+ }
+}
+
+func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ self.expandName(buf, t.name, len(s) == 0)
+ buf.WriteString(escape(s, self.allowReserved))
+}
+
+func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
+ if len(a) == 0 {
+ return
+ } else if !t.explode {
+ self.expandName(buf, t.name, false)
+ }
+ for i, value := range a {
+ if t.explode && i > 0 {
+ buf.WriteString(self.sep)
+ } else if i > 0 {
+ buf.WriteString(",")
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ if self.named && t.explode {
+ self.expandName(buf, t.name, len(s) == 0)
+ }
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+}
+
+func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
+ if len(m) == 0 {
+ return
+ }
+ if !t.explode {
+ self.expandName(buf, t.name, len(m) == 0)
+ }
+ var firstLen = buf.Len()
+ for k, value := range m {
+ if firstLen != buf.Len() {
+ if t.explode {
+ buf.WriteString(self.sep)
+ } else {
+ buf.WriteString(",")
+ }
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if t.explode {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune('=')
+ buf.WriteString(escape(s, self.allowReserved))
+ } else {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune(',')
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+ }
+}
+
+func struct2map(v interface{}) (map[string]interface{}, bool) {
+ value := reflect.ValueOf(v)
+ switch value.Type().Kind() {
+ case reflect.Ptr:
+ return struct2map(value.Elem().Interface())
+ case reflect.Struct:
+ m := make(map[string]interface{})
+ for i := 0; i < value.NumField(); i++ {
+ tag := value.Type().Field(i).Tag
+ var name string
+ if strings.Contains(string(tag), ":") {
+ name = tag.Get("uri")
+ } else {
+ name = strings.TrimSpace(string(tag))
+ }
+ if len(name) == 0 {
+ name = value.Type().Field(i).Name
+ }
+ m[name] = value.Field(i).Interface()
+ }
+ return m, true
+ }
+ return nil, false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils.go b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils.go
new file mode 100644
index 0000000..399ef46
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/olivere/elastic.v2/uritemplates/utils.go
@@ -0,0 +1,13 @@
+package uritemplates
+
+func Expand(path string, expansions map[string]string) (string, error) {
+ template, err := Parse(path)
+ if err != nil {
+ return "", err
+ }
+ values := make(map[string]interface{})
+ for k, v := range expansions {
+ values[k] = v
+ }
+ return template.Expand(values)
+}
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..37d3b66
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,79 @@
+GIT_COMMIT = $(shell git rev-parse HEAD)
+BUILD_TIME = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ" | tr -d '\n')
+
+ASSETS=template static
+CUR_DIR=$(shell pwd)
+GOPATH=$(CUR_DIR)/build/
+
+PARENT_PKG=amuz.es/go/
+NAME=wiki
+PKG=$(PARENT_PKG)$(NAME)
+
+usage:
+ @echo ""
+ @echo "Task : Description"
+ @echo "----------------- : -------------------"
+ @echo "make setup : Install all necessary dependencies"
+ @echo "make dev : Generate development build"
+ @echo "make test : Run tests"
+ @echo "make build : Generate production build for current OS"
+ @echo "make bootstrap : Install cross-compilation toolchain"
+ @echo "make release : Generate binaries for all supported OSes"
+ @echo "make test : Execute test suite"
+ @echo "make test-all : Execute test suite on multiple PG versions"
+ @echo "make clean : Remove all build files and reset assets"
+ @echo "make assets : Generate production assets file"
+ @echo "make dev-assets : Generate development assets file"
+ @echo ""
+
+test:
+ godep go test -cover ./...
+
+test-all:
+ @./script/test_all.sh
+
+assets-%: asset/%/
+ @echo "serialize $*"
+ @go-bindata -o bind/$*/handler.go -pkg $* -prefix $* -ignore=[.]gitignore -ignore=[.]gitkeep -prefix asset/$*/ $(BINDATA_OPTS) $<...
+
+assets: $(addprefix assets-,${ASSETS})
+
+dev-assets:
+ @$(MAKE) --no-print-directory assets BINDATA_OPTS="-debug"
+
+dev:
+ godep go build
+ @echo "You can now execute ./$(NAME)"
+
+dependency-save:
+ cd $(GOPATH)src/$(PKG) && go get && godep save && cd $(CUR_DIR)
+
+dependency-restore:
+ cd $(GOPATH)src/$(PKG) && godep restore && cd $(CUR_DIR)
+
+
+build: setup assets
+ godep go build
+ @echo "You can now execute ./$(NAME)"
+
+release: assets
+ @$(MAKE) --no-print-directory dependency-save
+ @echo "Building binaries..."
+
+ @echo "\nPackaging binaries...\n"
+ @./script/package.sh
+
+setup:
+ go get github.com/tools/godep
+ go get golang.org/x/tools/cmd/cover
+ godep get github.com/jteeuwen/go-bindata/...
+
+ mkdir -p $(GOPATH)src/$(PARENT_PKG)
+ ln -nsf $(CUR_DIR) $(GOPATH)src/$(PARENT_PKG)$(NAME)
+
+ @$(MAKE) --no-print-directory dependency-restore
+
+clean:
+ rm -f ./$(NAME)
+ rm -rf ./build
+ rm -rf ./bind/*
diff --git a/asset/static/css/animate.min.css b/asset/static/css/animate.min.css
new file mode 100755
index 0000000..b23c93a
--- /dev/null
+++ b/asset/static/css/animate.min.css
@@ -0,0 +1,11 @@
+@charset "UTF-8";
+
+/*!
+ * animate.css -http://daneden.me/animate
+ * Version - 3.5.1
+ * Licensed under the MIT license - http://opensource.org/licenses/MIT
+ *
+ * Copyright (c) 2016 Daniel Eden
+ */
+
+.animated{-webkit-animation-duration:1s;animation-duration:1s;-webkit-animation-fill-mode:both;animation-fill-mode:both}.animated.infinite{-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}.animated.hinge{-webkit-animation-duration:2s;animation-duration:2s}.animated.bounceIn,.animated.bounceOut,.animated.flipOutX,.animated.flipOutY{-webkit-animation-duration:.75s;animation-duration:.75s}@-webkit-keyframes bounce{0%,20%,53%,80%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1);-webkit-transform:translateZ(0);transform:translateZ(0)}40%,43%{-webkit-transform:translate3d(0,-30px,0);transform:translate3d(0,-30px,0)}40%,43%,70%{-webkit-animation-timing-function:cubic-bezier(.755,.05,.855,.06);animation-timing-function:cubic-bezier(.755,.05,.855,.06)}70%{-webkit-transform:translate3d(0,-15px,0);transform:translate3d(0,-15px,0)}90%{-webkit-transform:translate3d(0,-4px,0);transform:translate3d(0,-4px,0)}}@keyframes bounce{0%,20%,53%,80%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1);-webkit-transform:translateZ(0);transform:translateZ(0)}40%,43%{-webkit-transform:translate3d(0,-30px,0);transform:translate3d(0,-30px,0)}40%,43%,70%{-webkit-animation-timing-function:cubic-bezier(.755,.05,.855,.06);animation-timing-function:cubic-bezier(.755,.05,.855,.06)}70%{-webkit-transform:translate3d(0,-15px,0);transform:translate3d(0,-15px,0)}90%{-webkit-transform:translate3d(0,-4px,0);transform:translate3d(0,-4px,0)}}.bounce{-webkit-animation-name:bounce;animation-name:bounce;-webkit-transform-origin:center bottom;transform-origin:center bottom}@-webkit-keyframes flash{0%,50%,to{opacity:1}25%,75%{opacity:0}}@keyframes flash{0%,50%,to{opacity:1}25%,75%{opacity:0}}.flash{-webkit-animation-name:flash;animation-name:flash}@-webkit-keyframes pulse{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes pulse{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}50%{-webkit-transform:scale3d(1.05,1.05,1.05);transform:scale3d(1.05,1.05,1.05)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}.pulse{-webkit-animation-name:pulse;animation-name:pulse}@-webkit-keyframes rubberBand{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}30%{-webkit-transform:scale3d(1.25,.75,1);transform:scale3d(1.25,.75,1)}40%{-webkit-transform:scale3d(.75,1.25,1);transform:scale3d(.75,1.25,1)}50%{-webkit-transform:scale3d(1.15,.85,1);transform:scale3d(1.15,.85,1)}65%{-webkit-transform:scale3d(.95,1.05,1);transform:scale3d(.95,1.05,1)}75%{-webkit-transform:scale3d(1.05,.95,1);transform:scale3d(1.05,.95,1)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes rubberBand{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}30%{-webkit-transform:scale3d(1.25,.75,1);transform:scale3d(1.25,.75,1)}40%{-webkit-transform:scale3d(.75,1.25,1);transform:scale3d(.75,1.25,1)}50%{-webkit-transform:scale3d(1.15,.85,1);transform:scale3d(1.15,.85,1)}65%{-webkit-transform:scale3d(.95,1.05,1);transform:scale3d(.95,1.05,1)}75%{-webkit-transform:scale3d(1.05,.95,1);transform:scale3d(1.05,.95,1)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}.rubberBand{-webkit-animation-name:rubberBand;animation-name:rubberBand}@-webkit-keyframes shake{0%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}10%,30%,50%,70%,90%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}20%,40%,60%,80%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}}@keyframes shake{0%,to{-webkit-transform:translateZ(0);transform:translateZ(0)}10%,30%,50%,70%,90%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}20%,40%,60%,80%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}}.shake{-webkit-animation-name:shake;animation-name:shake}@-webkit-keyframes headShake{0%{-webkit-transform:translateX(0);transform:translateX(0)}6.5%{-webkit-transform:translateX(-6px) rotateY(-9deg);transform:translateX(-6px) rotateY(-9deg)}18.5%{-webkit-transform:translateX(5px) rotateY(7deg);transform:translateX(5px) rotateY(7deg)}31.5%{-webkit-transform:translateX(-3px) rotateY(-5deg);transform:translateX(-3px) rotateY(-5deg)}43.5%{-webkit-transform:translateX(2px) rotateY(3deg);transform:translateX(2px) rotateY(3deg)}50%{-webkit-transform:translateX(0);transform:translateX(0)}}@keyframes headShake{0%{-webkit-transform:translateX(0);transform:translateX(0)}6.5%{-webkit-transform:translateX(-6px) rotateY(-9deg);transform:translateX(-6px) rotateY(-9deg)}18.5%{-webkit-transform:translateX(5px) rotateY(7deg);transform:translateX(5px) rotateY(7deg)}31.5%{-webkit-transform:translateX(-3px) rotateY(-5deg);transform:translateX(-3px) rotateY(-5deg)}43.5%{-webkit-transform:translateX(2px) rotateY(3deg);transform:translateX(2px) rotateY(3deg)}50%{-webkit-transform:translateX(0);transform:translateX(0)}}.headShake{-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;-webkit-animation-name:headShake;animation-name:headShake}@-webkit-keyframes swing{20%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}40%{-webkit-transform:rotate(-10deg);transform:rotate(-10deg)}60%{-webkit-transform:rotate(5deg);transform:rotate(5deg)}80%{-webkit-transform:rotate(-5deg);transform:rotate(-5deg)}to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@keyframes swing{20%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}40%{-webkit-transform:rotate(-10deg);transform:rotate(-10deg)}60%{-webkit-transform:rotate(5deg);transform:rotate(5deg)}80%{-webkit-transform:rotate(-5deg);transform:rotate(-5deg)}to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}.swing{-webkit-transform-origin:top center;transform-origin:top center;-webkit-animation-name:swing;animation-name:swing}@-webkit-keyframes tada{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9) rotate(-3deg);transform:scale3d(.9,.9,.9) rotate(-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate(3deg);transform:scale3d(1.1,1.1,1.1) rotate(3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate(-3deg);transform:scale3d(1.1,1.1,1.1) rotate(-3deg)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes tada{0%{-webkit-transform:scaleX(1);transform:scaleX(1)}10%,20%{-webkit-transform:scale3d(.9,.9,.9) rotate(-3deg);transform:scale3d(.9,.9,.9) rotate(-3deg)}30%,50%,70%,90%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate(3deg);transform:scale3d(1.1,1.1,1.1) rotate(3deg)}40%,60%,80%{-webkit-transform:scale3d(1.1,1.1,1.1) rotate(-3deg);transform:scale3d(1.1,1.1,1.1) rotate(-3deg)}to{-webkit-transform:scaleX(1);transform:scaleX(1)}}.tada{-webkit-animation-name:tada;animation-name:tada}@-webkit-keyframes wobble{0%{-webkit-transform:none;transform:none}15%{-webkit-transform:translate3d(-25%,0,0) rotate(-5deg);transform:translate3d(-25%,0,0) rotate(-5deg)}30%{-webkit-transform:translate3d(20%,0,0) rotate(3deg);transform:translate3d(20%,0,0) rotate(3deg)}45%{-webkit-transform:translate3d(-15%,0,0) rotate(-3deg);transform:translate3d(-15%,0,0) rotate(-3deg)}60%{-webkit-transform:translate3d(10%,0,0) rotate(2deg);transform:translate3d(10%,0,0) rotate(2deg)}75%{-webkit-transform:translate3d(-5%,0,0) rotate(-1deg);transform:translate3d(-5%,0,0) rotate(-1deg)}to{-webkit-transform:none;transform:none}}@keyframes wobble{0%{-webkit-transform:none;transform:none}15%{-webkit-transform:translate3d(-25%,0,0) rotate(-5deg);transform:translate3d(-25%,0,0) rotate(-5deg)}30%{-webkit-transform:translate3d(20%,0,0) rotate(3deg);transform:translate3d(20%,0,0) rotate(3deg)}45%{-webkit-transform:translate3d(-15%,0,0) rotate(-3deg);transform:translate3d(-15%,0,0) rotate(-3deg)}60%{-webkit-transform:translate3d(10%,0,0) rotate(2deg);transform:translate3d(10%,0,0) rotate(2deg)}75%{-webkit-transform:translate3d(-5%,0,0) rotate(-1deg);transform:translate3d(-5%,0,0) rotate(-1deg)}to{-webkit-transform:none;transform:none}}.wobble{-webkit-animation-name:wobble;animation-name:wobble}@-webkit-keyframes jello{0%,11.1%,to{-webkit-transform:none;transform:none}22.2%{-webkit-transform:skewX(-12.5deg) skewY(-12.5deg);transform:skewX(-12.5deg) skewY(-12.5deg)}33.3%{-webkit-transform:skewX(6.25deg) skewY(6.25deg);transform:skewX(6.25deg) skewY(6.25deg)}44.4%{-webkit-transform:skewX(-3.125deg) skewY(-3.125deg);transform:skewX(-3.125deg) skewY(-3.125deg)}55.5%{-webkit-transform:skewX(1.5625deg) skewY(1.5625deg);transform:skewX(1.5625deg) skewY(1.5625deg)}66.6%{-webkit-transform:skewX(-.78125deg) skewY(-.78125deg);transform:skewX(-.78125deg) skewY(-.78125deg)}77.7%{-webkit-transform:skewX(.390625deg) skewY(.390625deg);transform:skewX(.390625deg) skewY(.390625deg)}88.8%{-webkit-transform:skewX(-.1953125deg) skewY(-.1953125deg);transform:skewX(-.1953125deg) skewY(-.1953125deg)}}@keyframes jello{0%,11.1%,to{-webkit-transform:none;transform:none}22.2%{-webkit-transform:skewX(-12.5deg) skewY(-12.5deg);transform:skewX(-12.5deg) skewY(-12.5deg)}33.3%{-webkit-transform:skewX(6.25deg) skewY(6.25deg);transform:skewX(6.25deg) skewY(6.25deg)}44.4%{-webkit-transform:skewX(-3.125deg) skewY(-3.125deg);transform:skewX(-3.125deg) skewY(-3.125deg)}55.5%{-webkit-transform:skewX(1.5625deg) skewY(1.5625deg);transform:skewX(1.5625deg) skewY(1.5625deg)}66.6%{-webkit-transform:skewX(-.78125deg) skewY(-.78125deg);transform:skewX(-.78125deg) skewY(-.78125deg)}77.7%{-webkit-transform:skewX(.390625deg) skewY(.390625deg);transform:skewX(.390625deg) skewY(.390625deg)}88.8%{-webkit-transform:skewX(-.1953125deg) skewY(-.1953125deg);transform:skewX(-.1953125deg) skewY(-.1953125deg)}}.jello{-webkit-animation-name:jello;animation-name:jello;-webkit-transform-origin:center;transform-origin:center}@-webkit-keyframes bounceIn{0%,20%,40%,60%,80%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}to{opacity:1;-webkit-transform:scaleX(1);transform:scaleX(1)}}@keyframes bounceIn{0%,20%,40%,60%,80%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}20%{-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}40%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}60%{opacity:1;-webkit-transform:scale3d(1.03,1.03,1.03);transform:scale3d(1.03,1.03,1.03)}80%{-webkit-transform:scale3d(.97,.97,.97);transform:scale3d(.97,.97,.97)}to{opacity:1;-webkit-transform:scaleX(1);transform:scaleX(1)}}.bounceIn{-webkit-animation-name:bounceIn;animation-name:bounceIn}@-webkit-keyframes bounceInDown{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,-3000px,0);transform:translate3d(0,-3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,25px,0);transform:translate3d(0,25px,0)}75%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}90%{-webkit-transform:translate3d(0,5px,0);transform:translate3d(0,5px,0)}to{-webkit-transform:none;transform:none}}@keyframes bounceInDown{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,-3000px,0);transform:translate3d(0,-3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,25px,0);transform:translate3d(0,25px,0)}75%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}90%{-webkit-transform:translate3d(0,5px,0);transform:translate3d(0,5px,0)}to{-webkit-transform:none;transform:none}}.bounceInDown{-webkit-animation-name:bounceInDown;animation-name:bounceInDown}@-webkit-keyframes bounceInLeft{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(-3000px,0,0);transform:translate3d(-3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(25px,0,0);transform:translate3d(25px,0,0)}75%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}90%{-webkit-transform:translate3d(5px,0,0);transform:translate3d(5px,0,0)}to{-webkit-transform:none;transform:none}}@keyframes bounceInLeft{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(-3000px,0,0);transform:translate3d(-3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(25px,0,0);transform:translate3d(25px,0,0)}75%{-webkit-transform:translate3d(-10px,0,0);transform:translate3d(-10px,0,0)}90%{-webkit-transform:translate3d(5px,0,0);transform:translate3d(5px,0,0)}to{-webkit-transform:none;transform:none}}.bounceInLeft{-webkit-animation-name:bounceInLeft;animation-name:bounceInLeft}@-webkit-keyframes bounceInRight{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(3000px,0,0);transform:translate3d(3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(-25px,0,0);transform:translate3d(-25px,0,0)}75%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}90%{-webkit-transform:translate3d(-5px,0,0);transform:translate3d(-5px,0,0)}to{-webkit-transform:none;transform:none}}@keyframes bounceInRight{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(3000px,0,0);transform:translate3d(3000px,0,0)}60%{opacity:1;-webkit-transform:translate3d(-25px,0,0);transform:translate3d(-25px,0,0)}75%{-webkit-transform:translate3d(10px,0,0);transform:translate3d(10px,0,0)}90%{-webkit-transform:translate3d(-5px,0,0);transform:translate3d(-5px,0,0)}to{-webkit-transform:none;transform:none}}.bounceInRight{-webkit-animation-name:bounceInRight;animation-name:bounceInRight}@-webkit-keyframes bounceInUp{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,3000px,0);transform:translate3d(0,3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}75%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}90%{-webkit-transform:translate3d(0,-5px,0);transform:translate3d(0,-5px,0)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes bounceInUp{0%,60%,75%,90%,to{-webkit-animation-timing-function:cubic-bezier(.215,.61,.355,1);animation-timing-function:cubic-bezier(.215,.61,.355,1)}0%{opacity:0;-webkit-transform:translate3d(0,3000px,0);transform:translate3d(0,3000px,0)}60%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}75%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}90%{-webkit-transform:translate3d(0,-5px,0);transform:translate3d(0,-5px,0)}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.bounceInUp{-webkit-animation-name:bounceInUp;animation-name:bounceInUp}@-webkit-keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}to{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}@keyframes bounceOut{20%{-webkit-transform:scale3d(.9,.9,.9);transform:scale3d(.9,.9,.9)}50%,55%{opacity:1;-webkit-transform:scale3d(1.1,1.1,1.1);transform:scale3d(1.1,1.1,1.1)}to{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}}.bounceOut{-webkit-animation-name:bounceOut;animation-name:bounceOut}@-webkit-keyframes bounceOutDown{20%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}to{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}@keyframes bounceOutDown{20%{-webkit-transform:translate3d(0,10px,0);transform:translate3d(0,10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,-20px,0);transform:translate3d(0,-20px,0)}to{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}.bounceOutDown{-webkit-animation-name:bounceOutDown;animation-name:bounceOutDown}@-webkit-keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:translate3d(20px,0,0);transform:translate3d(20px,0,0)}to{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}@keyframes bounceOutLeft{20%{opacity:1;-webkit-transform:translate3d(20px,0,0);transform:translate3d(20px,0,0)}to{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}.bounceOutLeft{-webkit-animation-name:bounceOutLeft;animation-name:bounceOutLeft}@-webkit-keyframes bounceOutRight{20%{opacity:1;-webkit-transform:translate3d(-20px,0,0);transform:translate3d(-20px,0,0)}to{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}@keyframes bounceOutRight{20%{opacity:1;-webkit-transform:translate3d(-20px,0,0);transform:translate3d(-20px,0,0)}to{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}.bounceOutRight{-webkit-animation-name:bounceOutRight;animation-name:bounceOutRight}@-webkit-keyframes bounceOutUp{20%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,20px,0);transform:translate3d(0,20px,0)}to{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}@keyframes bounceOutUp{20%{-webkit-transform:translate3d(0,-10px,0);transform:translate3d(0,-10px,0)}40%,45%{opacity:1;-webkit-transform:translate3d(0,20px,0);transform:translate3d(0,20px,0)}to{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}.bounceOutUp{-webkit-animation-name:bounceOutUp;animation-name:bounceOutUp}@-webkit-keyframes fadeIn{0%{opacity:0}to{opacity:1}}@keyframes fadeIn{0%{opacity:0}to{opacity:1}}.fadeIn{-webkit-animation-name:fadeIn;animation-name:fadeIn}@-webkit-keyframes fadeInDown{0%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInDown{0%{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInDown{-webkit-animation-name:fadeInDown;animation-name:fadeInDown}@-webkit-keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInDownBig{0%{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInDownBig{-webkit-animation-name:fadeInDownBig;animation-name:fadeInDownBig}@-webkit-keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInLeft{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInLeft{-webkit-animation-name:fadeInLeft;animation-name:fadeInLeft}@-webkit-keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInLeftBig{0%{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInLeftBig{-webkit-animation-name:fadeInLeftBig;animation-name:fadeInLeftBig}@-webkit-keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInRight{0%{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInRight{-webkit-animation-name:fadeInRight;animation-name:fadeInRight}@-webkit-keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInRightBig{0%{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInRightBig{-webkit-animation-name:fadeInRightBig;animation-name:fadeInRightBig}@-webkit-keyframes fadeInUp{0%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInUp{0%{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInUp{-webkit-animation-name:fadeInUp;animation-name:fadeInUp}@-webkit-keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes fadeInUpBig{0%{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}to{opacity:1;-webkit-transform:none;transform:none}}.fadeInUpBig{-webkit-animation-name:fadeInUpBig;animation-name:fadeInUpBig}@-webkit-keyframes fadeOut{0%{opacity:1}to{opacity:0}}@keyframes fadeOut{0%{opacity:1}to{opacity:0}}.fadeOut{-webkit-animation-name:fadeOut;animation-name:fadeOut}@-webkit-keyframes fadeOutDown{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}@keyframes fadeOutDown{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}.fadeOutDown{-webkit-animation-name:fadeOutDown;animation-name:fadeOutDown}@-webkit-keyframes fadeOutDownBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}@keyframes fadeOutDownBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,2000px,0);transform:translate3d(0,2000px,0)}}.fadeOutDownBig{-webkit-animation-name:fadeOutDownBig;animation-name:fadeOutDownBig}@-webkit-keyframes fadeOutLeft{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@keyframes fadeOutLeft{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.fadeOutLeft{-webkit-animation-name:fadeOutLeft;animation-name:fadeOutLeft}@-webkit-keyframes fadeOutLeftBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}@keyframes fadeOutLeftBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(-2000px,0,0);transform:translate3d(-2000px,0,0)}}.fadeOutLeftBig{-webkit-animation-name:fadeOutLeftBig;animation-name:fadeOutLeftBig}@-webkit-keyframes fadeOutRight{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}@keyframes fadeOutRight{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.fadeOutRight{-webkit-animation-name:fadeOutRight;animation-name:fadeOutRight}@-webkit-keyframes fadeOutRightBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}@keyframes fadeOutRightBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(2000px,0,0);transform:translate3d(2000px,0,0)}}.fadeOutRightBig{-webkit-animation-name:fadeOutRightBig;animation-name:fadeOutRightBig}@-webkit-keyframes fadeOutUp{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}@keyframes fadeOutUp{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}.fadeOutUp{-webkit-animation-name:fadeOutUp;animation-name:fadeOutUp}@-webkit-keyframes fadeOutUpBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}@keyframes fadeOutUpBig{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(0,-2000px,0);transform:translate3d(0,-2000px,0)}}.fadeOutUpBig{-webkit-animation-name:fadeOutUpBig;animation-name:fadeOutUpBig}@-webkit-keyframes flip{0%{-webkit-transform:perspective(400px) rotateY(-1turn);transform:perspective(400px) rotateY(-1turn)}0%,40%{-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}40%{-webkit-transform:perspective(400px) translateZ(150px) rotateY(-190deg);transform:perspective(400px) translateZ(150px) rotateY(-190deg)}50%{-webkit-transform:perspective(400px) translateZ(150px) rotateY(-170deg);transform:perspective(400px) translateZ(150px) rotateY(-170deg)}50%,80%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}80%{-webkit-transform:perspective(400px) scale3d(.95,.95,.95);transform:perspective(400px) scale3d(.95,.95,.95)}to{-webkit-transform:perspective(400px);transform:perspective(400px);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}}@keyframes flip{0%{-webkit-transform:perspective(400px) rotateY(-1turn);transform:perspective(400px) rotateY(-1turn)}0%,40%{-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}40%{-webkit-transform:perspective(400px) translateZ(150px) rotateY(-190deg);transform:perspective(400px) translateZ(150px) rotateY(-190deg)}50%{-webkit-transform:perspective(400px) translateZ(150px) rotateY(-170deg);transform:perspective(400px) translateZ(150px) rotateY(-170deg)}50%,80%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}80%{-webkit-transform:perspective(400px) scale3d(.95,.95,.95);transform:perspective(400px) scale3d(.95,.95,.95)}to{-webkit-transform:perspective(400px);transform:perspective(400px);-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}}.animated.flip{-webkit-backface-visibility:visible;backface-visibility:visible;-webkit-animation-name:flip;animation-name:flip}@-webkit-keyframes flipInX{0%{-webkit-transform:perspective(400px) rotateX(90deg);transform:perspective(400px) rotateX(90deg);opacity:0}0%,40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}40%{-webkit-transform:perspective(400px) rotateX(-20deg);transform:perspective(400px) rotateX(-20deg)}60%{-webkit-transform:perspective(400px) rotateX(10deg);transform:perspective(400px) rotateX(10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotateX(-5deg);transform:perspective(400px) rotateX(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInX{0%{-webkit-transform:perspective(400px) rotateX(90deg);transform:perspective(400px) rotateX(90deg);opacity:0}0%,40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}40%{-webkit-transform:perspective(400px) rotateX(-20deg);transform:perspective(400px) rotateX(-20deg)}60%{-webkit-transform:perspective(400px) rotateX(10deg);transform:perspective(400px) rotateX(10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotateX(-5deg);transform:perspective(400px) rotateX(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}.flipInX{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipInX;animation-name:flipInX}@-webkit-keyframes flipInY{0%{-webkit-transform:perspective(400px) rotateY(90deg);transform:perspective(400px) rotateY(90deg);opacity:0}0%,40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}40%{-webkit-transform:perspective(400px) rotateY(-20deg);transform:perspective(400px) rotateY(-20deg)}60%{-webkit-transform:perspective(400px) rotateY(10deg);transform:perspective(400px) rotateY(10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotateY(-5deg);transform:perspective(400px) rotateY(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}@keyframes flipInY{0%{-webkit-transform:perspective(400px) rotateY(90deg);transform:perspective(400px) rotateY(90deg);opacity:0}0%,40%{-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}40%{-webkit-transform:perspective(400px) rotateY(-20deg);transform:perspective(400px) rotateY(-20deg)}60%{-webkit-transform:perspective(400px) rotateY(10deg);transform:perspective(400px) rotateY(10deg);opacity:1}80%{-webkit-transform:perspective(400px) rotateY(-5deg);transform:perspective(400px) rotateY(-5deg)}to{-webkit-transform:perspective(400px);transform:perspective(400px)}}.flipInY{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipInY;animation-name:flipInY}@-webkit-keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotateX(-20deg);transform:perspective(400px) rotateX(-20deg);opacity:1}to{-webkit-transform:perspective(400px) rotateX(90deg);transform:perspective(400px) rotateX(90deg);opacity:0}}@keyframes flipOutX{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotateX(-20deg);transform:perspective(400px) rotateX(-20deg);opacity:1}to{-webkit-transform:perspective(400px) rotateX(90deg);transform:perspective(400px) rotateX(90deg);opacity:0}}.flipOutX{-webkit-animation-name:flipOutX;animation-name:flipOutX;-webkit-backface-visibility:visible!important;backface-visibility:visible!important}@-webkit-keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotateY(-15deg);transform:perspective(400px) rotateY(-15deg);opacity:1}to{-webkit-transform:perspective(400px) rotateY(90deg);transform:perspective(400px) rotateY(90deg);opacity:0}}@keyframes flipOutY{0%{-webkit-transform:perspective(400px);transform:perspective(400px)}30%{-webkit-transform:perspective(400px) rotateY(-15deg);transform:perspective(400px) rotateY(-15deg);opacity:1}to{-webkit-transform:perspective(400px) rotateY(90deg);transform:perspective(400px) rotateY(90deg);opacity:0}}.flipOutY{-webkit-backface-visibility:visible!important;backface-visibility:visible!important;-webkit-animation-name:flipOutY;animation-name:flipOutY}@-webkit-keyframes lightSpeedIn{0%{-webkit-transform:translate3d(100%,0,0) skewX(-30deg);transform:translate3d(100%,0,0) skewX(-30deg);opacity:0}60%{-webkit-transform:skewX(20deg);transform:skewX(20deg)}60%,80%{opacity:1}80%{-webkit-transform:skewX(-5deg);transform:skewX(-5deg)}to{-webkit-transform:none;transform:none;opacity:1}}@keyframes lightSpeedIn{0%{-webkit-transform:translate3d(100%,0,0) skewX(-30deg);transform:translate3d(100%,0,0) skewX(-30deg);opacity:0}60%{-webkit-transform:skewX(20deg);transform:skewX(20deg)}60%,80%{opacity:1}80%{-webkit-transform:skewX(-5deg);transform:skewX(-5deg)}to{-webkit-transform:none;transform:none;opacity:1}}.lightSpeedIn{-webkit-animation-name:lightSpeedIn;animation-name:lightSpeedIn;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}@-webkit-keyframes lightSpeedOut{0%{opacity:1}to{-webkit-transform:translate3d(100%,0,0) skewX(30deg);transform:translate3d(100%,0,0) skewX(30deg);opacity:0}}@keyframes lightSpeedOut{0%{opacity:1}to{-webkit-transform:translate3d(100%,0,0) skewX(30deg);transform:translate3d(100%,0,0) skewX(30deg);opacity:0}}.lightSpeedOut{-webkit-animation-name:lightSpeedOut;animation-name:lightSpeedOut;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}@-webkit-keyframes rotateIn{0%{transform-origin:center;-webkit-transform:rotate(-200deg);transform:rotate(-200deg);opacity:0}0%,to{-webkit-transform-origin:center}to{transform-origin:center;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateIn{0%{transform-origin:center;-webkit-transform:rotate(-200deg);transform:rotate(-200deg);opacity:0}0%,to{-webkit-transform-origin:center}to{transform-origin:center;-webkit-transform:none;transform:none;opacity:1}}.rotateIn{-webkit-animation-name:rotateIn;animation-name:rotateIn}@-webkit-keyframes rotateInDownLeft{0%{transform-origin:left bottom;-webkit-transform:rotate(-45deg);transform:rotate(-45deg);opacity:0}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInDownLeft{0%{transform-origin:left bottom;-webkit-transform:rotate(-45deg);transform:rotate(-45deg);opacity:0}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInDownLeft{-webkit-animation-name:rotateInDownLeft;animation-name:rotateInDownLeft}@-webkit-keyframes rotateInDownRight{0%{transform-origin:right bottom;-webkit-transform:rotate(45deg);transform:rotate(45deg);opacity:0}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInDownRight{0%{transform-origin:right bottom;-webkit-transform:rotate(45deg);transform:rotate(45deg);opacity:0}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInDownRight{-webkit-animation-name:rotateInDownRight;animation-name:rotateInDownRight}@-webkit-keyframes rotateInUpLeft{0%{transform-origin:left bottom;-webkit-transform:rotate(45deg);transform:rotate(45deg);opacity:0}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInUpLeft{0%{transform-origin:left bottom;-webkit-transform:rotate(45deg);transform:rotate(45deg);opacity:0}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInUpLeft{-webkit-animation-name:rotateInUpLeft;animation-name:rotateInUpLeft}@-webkit-keyframes rotateInUpRight{0%{transform-origin:right bottom;-webkit-transform:rotate(-90deg);transform:rotate(-90deg);opacity:0}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}@keyframes rotateInUpRight{0%{transform-origin:right bottom;-webkit-transform:rotate(-90deg);transform:rotate(-90deg);opacity:0}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:none;transform:none;opacity:1}}.rotateInUpRight{-webkit-animation-name:rotateInUpRight;animation-name:rotateInUpRight}@-webkit-keyframes rotateOut{0%{transform-origin:center;opacity:1}0%,to{-webkit-transform-origin:center}to{transform-origin:center;-webkit-transform:rotate(200deg);transform:rotate(200deg);opacity:0}}@keyframes rotateOut{0%{transform-origin:center;opacity:1}0%,to{-webkit-transform-origin:center}to{transform-origin:center;-webkit-transform:rotate(200deg);transform:rotate(200deg);opacity:0}}.rotateOut{-webkit-animation-name:rotateOut;animation-name:rotateOut}@-webkit-keyframes rotateOutDownLeft{0%{transform-origin:left bottom;opacity:1}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:rotate(45deg);transform:rotate(45deg);opacity:0}}@keyframes rotateOutDownLeft{0%{transform-origin:left bottom;opacity:1}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:rotate(45deg);transform:rotate(45deg);opacity:0}}.rotateOutDownLeft{-webkit-animation-name:rotateOutDownLeft;animation-name:rotateOutDownLeft}@-webkit-keyframes rotateOutDownRight{0%{transform-origin:right bottom;opacity:1}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:rotate(-45deg);transform:rotate(-45deg);opacity:0}}@keyframes rotateOutDownRight{0%{transform-origin:right bottom;opacity:1}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:rotate(-45deg);transform:rotate(-45deg);opacity:0}}.rotateOutDownRight{-webkit-animation-name:rotateOutDownRight;animation-name:rotateOutDownRight}@-webkit-keyframes rotateOutUpLeft{0%{transform-origin:left bottom;opacity:1}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:rotate(-45deg);transform:rotate(-45deg);opacity:0}}@keyframes rotateOutUpLeft{0%{transform-origin:left bottom;opacity:1}0%,to{-webkit-transform-origin:left bottom}to{transform-origin:left bottom;-webkit-transform:rotate(-45deg);transform:rotate(-45deg);opacity:0}}.rotateOutUpLeft{-webkit-animation-name:rotateOutUpLeft;animation-name:rotateOutUpLeft}@-webkit-keyframes rotateOutUpRight{0%{transform-origin:right bottom;opacity:1}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:rotate(90deg);transform:rotate(90deg);opacity:0}}@keyframes rotateOutUpRight{0%{transform-origin:right bottom;opacity:1}0%,to{-webkit-transform-origin:right bottom}to{transform-origin:right bottom;-webkit-transform:rotate(90deg);transform:rotate(90deg);opacity:0}}.rotateOutUpRight{-webkit-animation-name:rotateOutUpRight;animation-name:rotateOutUpRight}@-webkit-keyframes hinge{0%{transform-origin:top left}0%,20%,60%{-webkit-transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-transform:rotate(80deg);transform:rotate(80deg);transform-origin:top left}40%,80%{-webkit-transform:rotate(60deg);transform:rotate(60deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;opacity:1}to{-webkit-transform:translate3d(0,700px,0);transform:translate3d(0,700px,0);opacity:0}}@keyframes hinge{0%{transform-origin:top left}0%,20%,60%{-webkit-transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}20%,60%{-webkit-transform:rotate(80deg);transform:rotate(80deg);transform-origin:top left}40%,80%{-webkit-transform:rotate(60deg);transform:rotate(60deg);-webkit-transform-origin:top left;transform-origin:top left;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;opacity:1}to{-webkit-transform:translate3d(0,700px,0);transform:translate3d(0,700px,0);opacity:0}}.hinge{-webkit-animation-name:hinge;animation-name:hinge}@-webkit-keyframes rollIn{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0) rotate(-120deg);transform:translate3d(-100%,0,0) rotate(-120deg)}to{opacity:1;-webkit-transform:none;transform:none}}@keyframes rollIn{0%{opacity:0;-webkit-transform:translate3d(-100%,0,0) rotate(-120deg);transform:translate3d(-100%,0,0) rotate(-120deg)}to{opacity:1;-webkit-transform:none;transform:none}}.rollIn{-webkit-animation-name:rollIn;animation-name:rollIn}@-webkit-keyframes rollOut{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(100%,0,0) rotate(120deg);transform:translate3d(100%,0,0) rotate(120deg)}}@keyframes rollOut{0%{opacity:1}to{opacity:0;-webkit-transform:translate3d(100%,0,0) rotate(120deg);transform:translate3d(100%,0,0) rotate(120deg)}}.rollOut{-webkit-animation-name:rollOut;animation-name:rollOut}@-webkit-keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}@keyframes zoomIn{0%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%{opacity:1}}.zoomIn{-webkit-animation-name:zoomIn;animation-name:zoomIn}@-webkit-keyframes zoomInDown{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}@keyframes zoomInDown{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-1000px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}.zoomInDown{-webkit-animation-name:zoomInDown;animation-name:zoomInDown}@-webkit-keyframes zoomInLeft{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(10px,0,0);transform:scale3d(.475,.475,.475) translate3d(10px,0,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}@keyframes zoomInLeft{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(-1000px,0,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(10px,0,0);transform:scale3d(.475,.475,.475) translate3d(10px,0,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}.zoomInLeft{-webkit-animation-name:zoomInLeft;animation-name:zoomInLeft}@-webkit-keyframes zoomInRight{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}@keyframes zoomInRight{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);transform:scale3d(.1,.1,.1) translate3d(1000px,0,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);transform:scale3d(.475,.475,.475) translate3d(-10px,0,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}.zoomInRight{-webkit-animation-name:zoomInRight;animation-name:zoomInRight}@-webkit-keyframes zoomInUp{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}@keyframes zoomInUp{0%{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);transform:scale3d(.1,.1,.1) translate3d(0,1000px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}60%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}.zoomInUp{-webkit-animation-name:zoomInUp;animation-name:zoomInUp}@-webkit-keyframes zoomOut{0%{opacity:1}50%{-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%,to{opacity:0}}@keyframes zoomOut{0%{opacity:1}50%{-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}50%,to{opacity:0}}.zoomOut{-webkit-animation-name:zoomOut;animation-name:zoomOut}@-webkit-keyframes zoomOutDown{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}to{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}@keyframes zoomOutDown{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);transform:scale3d(.475,.475,.475) translate3d(0,-60px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}to{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}.zoomOutDown{-webkit-animation-name:zoomOutDown;animation-name:zoomOutDown}@-webkit-keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(42px,0,0);transform:scale3d(.475,.475,.475) translate3d(42px,0,0)}to{opacity:0;-webkit-transform:scale(.1) translate3d(-2000px,0,0);transform:scale(.1) translate3d(-2000px,0,0);-webkit-transform-origin:left center;transform-origin:left center}}@keyframes zoomOutLeft{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(42px,0,0);transform:scale3d(.475,.475,.475) translate3d(42px,0,0)}to{opacity:0;-webkit-transform:scale(.1) translate3d(-2000px,0,0);transform:scale(.1) translate3d(-2000px,0,0);-webkit-transform-origin:left center;transform-origin:left center}}.zoomOutLeft{-webkit-animation-name:zoomOutLeft;animation-name:zoomOutLeft}@-webkit-keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-42px,0,0);transform:scale3d(.475,.475,.475) translate3d(-42px,0,0)}to{opacity:0;-webkit-transform:scale(.1) translate3d(2000px,0,0);transform:scale(.1) translate3d(2000px,0,0);-webkit-transform-origin:right center;transform-origin:right center}}@keyframes zoomOutRight{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(-42px,0,0);transform:scale3d(.475,.475,.475) translate3d(-42px,0,0)}to{opacity:0;-webkit-transform:scale(.1) translate3d(2000px,0,0);transform:scale(.1) translate3d(2000px,0,0);-webkit-transform-origin:right center;transform-origin:right center}}.zoomOutRight{-webkit-animation-name:zoomOutRight;animation-name:zoomOutRight}@-webkit-keyframes zoomOutUp{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}to{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}@keyframes zoomOutUp{40%{opacity:1;-webkit-transform:scale3d(.475,.475,.475) translate3d(0,60px,0);transform:scale3d(.475,.475,.475) translate3d(0,60px,0);-webkit-animation-timing-function:cubic-bezier(.55,.055,.675,.19);animation-timing-function:cubic-bezier(.55,.055,.675,.19)}to{opacity:0;-webkit-transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);transform:scale3d(.1,.1,.1) translate3d(0,-2000px,0);-webkit-transform-origin:center bottom;transform-origin:center bottom;-webkit-animation-timing-function:cubic-bezier(.175,.885,.32,1);animation-timing-function:cubic-bezier(.175,.885,.32,1)}}.zoomOutUp{-webkit-animation-name:zoomOutUp;animation-name:zoomOutUp}@-webkit-keyframes slideInDown{0%{-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInDown{0%{-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.slideInDown{-webkit-animation-name:slideInDown;animation-name:slideInDown}@-webkit-keyframes slideInLeft{0%{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInLeft{0%{-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.slideInLeft{-webkit-animation-name:slideInLeft;animation-name:slideInLeft}@-webkit-keyframes slideInRight{0%{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInRight{0%{-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.slideInRight{-webkit-animation-name:slideInRight;animation-name:slideInRight}@-webkit-keyframes slideInUp{0%{-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}@keyframes slideInUp{0%{-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0);visibility:visible}to{-webkit-transform:translateZ(0);transform:translateZ(0)}}.slideInUp{-webkit-animation-name:slideInUp;animation-name:slideInUp}@-webkit-keyframes slideOutDown{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}@keyframes slideOutDown{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(0,100%,0);transform:translate3d(0,100%,0)}}.slideOutDown{-webkit-animation-name:slideOutDown;animation-name:slideOutDown}@-webkit-keyframes slideOutLeft{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}@keyframes slideOutLeft{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}}.slideOutLeft{-webkit-animation-name:slideOutLeft;animation-name:slideOutLeft}@-webkit-keyframes slideOutRight{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}@keyframes slideOutRight{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}}.slideOutRight{-webkit-animation-name:slideOutRight;animation-name:slideOutRight}@-webkit-keyframes slideOutUp{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}@keyframes slideOutUp{0%{-webkit-transform:translateZ(0);transform:translateZ(0)}to{visibility:hidden;-webkit-transform:translate3d(0,-100%,0);transform:translate3d(0,-100%,0)}}.slideOutUp{-webkit-animation-name:slideOutUp;animation-name:slideOutUp}
diff --git a/asset/static/css/bootstrap.min.css b/asset/static/css/bootstrap.min.css
new file mode 100755
index 0000000..4cf729e
--- /dev/null
+++ b/asset/static/css/bootstrap.min.css
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.3.6 (http://getbootstrap.com)
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ *//*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,menu,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background-color:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */@media print{*,:after,:before{color:#000!important;text-shadow:none!important;background:0 0!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}.navbar{display:none}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff2) format('woff2'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\002a"}.glyphicon-plus:before{content:"\002b"}.glyphicon-eur:before,.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.glyphicon-cd:before{content:"\e201"}.glyphicon-save-file:before{content:"\e202"}.glyphicon-open-file:before{content:"\e203"}.glyphicon-level-up:before{content:"\e204"}.glyphicon-copy:before{content:"\e205"}.glyphicon-paste:before{content:"\e206"}.glyphicon-alert:before{content:"\e209"}.glyphicon-equalizer:before{content:"\e210"}.glyphicon-king:before{content:"\e211"}.glyphicon-queen:before{content:"\e212"}.glyphicon-pawn:before{content:"\e213"}.glyphicon-bishop:before{content:"\e214"}.glyphicon-knight:before{content:"\e215"}.glyphicon-baby-formula:before{content:"\e216"}.glyphicon-tent:before{content:"\26fa"}.glyphicon-blackboard:before{content:"\e218"}.glyphicon-bed:before{content:"\e219"}.glyphicon-apple:before{content:"\f8ff"}.glyphicon-erase:before{content:"\e221"}.glyphicon-hourglass:before{content:"\231b"}.glyphicon-lamp:before{content:"\e223"}.glyphicon-duplicate:before{content:"\e224"}.glyphicon-piggy-bank:before{content:"\e225"}.glyphicon-scissors:before{content:"\e226"}.glyphicon-bitcoin:before{content:"\e227"}.glyphicon-btc:before{content:"\e227"}.glyphicon-xbt:before{content:"\e227"}.glyphicon-yen:before{content:"\00a5"}.glyphicon-jpy:before{content:"\00a5"}.glyphicon-ruble:before{content:"\20bd"}.glyphicon-rub:before{content:"\20bd"}.glyphicon-scale:before{content:"\e230"}.glyphicon-ice-lolly:before{content:"\e231"}.glyphicon-ice-lolly-tasted:before{content:"\e232"}.glyphicon-education:before{content:"\e233"}.glyphicon-option-horizontal:before{content:"\e234"}.glyphicon-option-vertical:before{content:"\e235"}.glyphicon-menu-hamburger:before{content:"\e236"}.glyphicon-modal-window:before{content:"\e237"}.glyphicon-oil:before{content:"\e238"}.glyphicon-grain:before{content:"\e239"}.glyphicon-sunglasses:before{content:"\e240"}.glyphicon-text-size:before{content:"\e241"}.glyphicon-text-color:before{content:"\e242"}.glyphicon-text-background:before{content:"\e243"}.glyphicon-object-align-top:before{content:"\e244"}.glyphicon-object-align-bottom:before{content:"\e245"}.glyphicon-object-align-horizontal:before{content:"\e246"}.glyphicon-object-align-left:before{content:"\e247"}.glyphicon-object-align-vertical:before{content:"\e248"}.glyphicon-object-align-right:before{content:"\e249"}.glyphicon-triangle-right:before{content:"\e250"}.glyphicon-triangle-left:before{content:"\e251"}.glyphicon-triangle-bottom:before{content:"\e252"}.glyphicon-triangle-top:before{content:"\e253"}.glyphicon-console:before{content:"\e254"}.glyphicon-superscript:before{content:"\e255"}.glyphicon-subscript:before{content:"\e256"}.glyphicon-menu-left:before{content:"\e257"}.glyphicon-menu-right:before{content:"\e258"}.glyphicon-menu-down:before{content:"\e259"}.glyphicon-menu-up:before{content:"\e260"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:after,:before{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}button,input,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#337ab7;text-decoration:none}a:focus,a:hover{color:#23527c;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.carousel-inner>.item>a>img,.carousel-inner>.item>img,.img-responsive,.thumbnail a>img,.thumbnail>img{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}[role=button]{cursor:pointer}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-weight:400;line-height:1;color:#777}.h1,.h2,.h3,h1,h2,h3{margin-top:20px;margin-bottom:10px}.h1 .small,.h1 small,.h2 .small,.h2 small,.h3 .small,.h3 small,h1 .small,h1 small,h2 .small,h2 small,h3 .small,h3 small{font-size:65%}.h4,.h5,.h6,h4,h5,h6{margin-top:10px;margin-bottom:10px}.h4 .small,.h4 small,.h5 .small,.h5 small,.h6 .small,.h6 small,h4 .small,h4 small,h5 .small,h5 small,h6 .small,h6 small{font-size:75%}.h1,h1{font-size:36px}.h2,h2{font-size:30px}.h3,h3{font-size:24px}.h4,h4{font-size:18px}.h5,h5{font-size:14px}.h6,h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}.small,small{font-size:85%}.mark,mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#337ab7}a.text-primary:focus,a.text-primary:hover{color:#286090}.text-success{color:#3c763d}a.text-success:focus,a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:focus,a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:focus,a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:focus,a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#337ab7}a.bg-primary:focus,a.bg-primary:hover{background-color:#286090}.bg-success{background-color:#dff0d8}a.bg-success:focus,a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:focus,a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:focus,a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:focus,a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ol,ul{margin-top:0;margin-bottom:10px}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dd,dt{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[data-original-title],abbr[title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote ol:last-child,blockquote p:last-child,blockquote ul:last-child{margin-bottom:0}blockquote .small,blockquote footer,blockquote small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote .small:before,blockquote footer:before,blockquote small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse .small:before,.blockquote-reverse footer:before,.blockquote-reverse small:before,blockquote.pull-right .small:before,blockquote.pull-right footer:before,blockquote.pull-right small:before{content:''}.blockquote-reverse .small:after,.blockquote-reverse footer:after,.blockquote-reverse small:after,blockquote.pull-right .small:after,blockquote.pull-right footer:after,blockquote.pull-right small:after{content:'\00A0 \2014'}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;font-weight:700;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-10,.col-xs-11,.col-xs-12,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}caption{padding-top:8px;padding-bottom:8px;color:#777;text-align:left}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>tbody>tr>td,.table>tbody>tr>th,.table>tfoot>tr>td,.table>tfoot>tr>th,.table>thead>tr>td,.table>thead>tr>th{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>td,.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>td,.table>thead:first-child>tr:first-child>th{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>tbody>tr>td,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>td,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>thead>tr>th{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>tbody>tr>td,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>td,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border:1px solid #ddd}.table-bordered>thead>tr>td,.table-bordered>thead>tr>th{border-bottom-width:2px}.table-striped>tbody>tr:nth-of-type(odd){background-color:#f9f9f9}.table-hover>tbody>tr:hover{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>tbody>tr.active>td,.table>tbody>tr.active>th,.table>tbody>tr>td.active,.table>tbody>tr>th.active,.table>tfoot>tr.active>td,.table>tfoot>tr.active>th,.table>tfoot>tr>td.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>thead>tr.active>th,.table>thead>tr>td.active,.table>thead>tr>th.active{background-color:#f5f5f5}.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover{background-color:#e8e8e8}.table>tbody>tr.success>td,.table>tbody>tr.success>th,.table>tbody>tr>td.success,.table>tbody>tr>th.success,.table>tfoot>tr.success>td,.table>tfoot>tr.success>th,.table>tfoot>tr>td.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>thead>tr.success>th,.table>thead>tr>td.success,.table>thead>tr>th.success{background-color:#dff0d8}.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover{background-color:#d0e9c6}.table>tbody>tr.info>td,.table>tbody>tr.info>th,.table>tbody>tr>td.info,.table>tbody>tr>th.info,.table>tfoot>tr.info>td,.table>tfoot>tr.info>th,.table>tfoot>tr>td.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>thead>tr.info>th,.table>thead>tr>td.info,.table>thead>tr>th.info{background-color:#d9edf7}.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover{background-color:#c4e3f3}.table>tbody>tr.warning>td,.table>tbody>tr.warning>th,.table>tbody>tr>td.warning,.table>tbody>tr>th.warning,.table>tfoot>tr.warning>td,.table>tfoot>tr.warning>th,.table>tfoot>tr>td.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>thead>tr.warning>th,.table>thead>tr>td.warning,.table>thead>tr>th.warning{background-color:#fcf8e3}.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover{background-color:#faf2cc}.table>tbody>tr.danger>td,.table>tbody>tr.danger>th,.table>tbody>tr>td.danger,.table>tbody>tr>th.danger,.table>tfoot>tr.danger>td,.table>tfoot>tr.danger>th,.table>tfoot>tr>td.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>thead>tr.danger>th,.table>thead>tr>td.danger,.table>thead>tr>th.danger{background-color:#f2dede}.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover{background-color:#ebcccc}.table-responsive{min-height:.01%;overflow-x:auto}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>td,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>thead>tr>th{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=checkbox],input[type=radio]{margin:4px 0 0;margin-top:1px\9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=checkbox]:focus,input[type=radio]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control::-ms-expand{background-color:transparent;border:0}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{background-color:#eee;opacity:1}.form-control[disabled],fieldset[disabled] .form-control{cursor:not-allowed}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}@media screen and (-webkit-min-device-pixel-ratio:0){input[type=date].form-control,input[type=time].form-control,input[type=datetime-local].form-control,input[type=month].form-control{line-height:34px}.input-group-sm input[type=date],.input-group-sm input[type=time],.input-group-sm input[type=datetime-local],.input-group-sm input[type=month],input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}.input-group-lg input[type=date],.input-group-lg input[type=time],.input-group-lg input[type=datetime-local],.input-group-lg input[type=month],input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}}.form-group{margin-bottom:15px}.checkbox,.radio{position:relative;display:block;margin-top:10px;margin-bottom:10px}.checkbox label,.radio label{min-height:20px;padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox],.radio input[type=radio],.radio-inline input[type=radio]{position:absolute;margin-top:4px\9;margin-left:-20px}.checkbox+.checkbox,.radio+.radio{margin-top:-5px}.checkbox-inline,.radio-inline{position:relative;display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.checkbox-inline+.checkbox-inline,.radio-inline+.radio-inline{margin-top:0;margin-left:10px}fieldset[disabled] input[type=checkbox],fieldset[disabled] input[type=radio],input[type=checkbox].disabled,input[type=checkbox][disabled],input[type=radio].disabled,input[type=radio][disabled]{cursor:not-allowed}.checkbox-inline.disabled,.radio-inline.disabled,fieldset[disabled] .checkbox-inline,fieldset[disabled] .radio-inline{cursor:not-allowed}.checkbox.disabled label,.radio.disabled label,fieldset[disabled] .checkbox label,fieldset[disabled] .radio label{cursor:not-allowed}.form-control-static{min-height:34px;padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}select[multiple].input-sm,textarea.input-sm{height:auto}.form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.form-group-sm select.form-control{height:30px;line-height:30px}.form-group-sm select[multiple].form-control,.form-group-sm textarea.form-control{height:auto}.form-group-sm .form-control-static{height:30px;min-height:32px;padding:6px 10px;font-size:12px;line-height:1.5}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-lg{height:46px;line-height:46px}select[multiple].input-lg,textarea.input-lg{height:auto}.form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.form-group-lg select.form-control{height:46px;line-height:46px}.form-group-lg select[multiple].form-control,.form-group-lg textarea.form-control{height:auto}.form-group-lg .form-control-static{height:46px;min-height:38px;padding:11px 16px;font-size:18px;line-height:1.3333333}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:0;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center;pointer-events:none}.form-group-lg .form-control+.form-control-feedback,.input-group-lg+.form-control-feedback,.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.form-group-sm .form-control+.form-control-feedback,.input-group-sm+.form-control-feedback,.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .checkbox,.has-success .checkbox-inline,.has-success .control-label,.has-success .help-block,.has-success .radio,.has-success .radio-inline,.has-success.checkbox label,.has-success.checkbox-inline label,.has-success.radio label,.has-success.radio-inline label{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .checkbox,.has-warning .checkbox-inline,.has-warning .control-label,.has-warning .help-block,.has-warning .radio,.has-warning .radio-inline,.has-warning.checkbox label,.has-warning.checkbox-inline label,.has-warning.radio label,.has-warning.radio-inline label{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .checkbox,.has-error .checkbox-inline,.has-error .control-label,.has-error .help-block,.has-error .radio,.has-error .radio-inline,.has-error.checkbox label,.has-error.checkbox-inline label,.has-error.radio label,.has-error.radio-inline label{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label~.form-control-feedback{top:25px}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-static{display:inline-block}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .form-control,.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .checkbox,.form-inline .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .checkbox label,.form-inline .radio label{padding-left:0}.form-inline .checkbox input[type=checkbox],.form-inline .radio input[type=radio]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .checkbox,.form-horizontal .checkbox-inline,.form-horizontal .radio,.form-horizontal .radio-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .checkbox,.form-horizontal .radio{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:11px;font-size:18px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px;font-size:12px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;-ms-touch-action:manipulation;touch-action:manipulation;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn.active.focus,.btn.active:focus,.btn.focus,.btn:active.focus,.btn:active:focus,.btn:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.focus,.btn:focus,.btn:hover{color:#333;text-decoration:none}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}a.btn.disabled,fieldset[disabled] a.btn{pointer-events:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default.focus,.btn-default:focus{color:#333;background-color:#e6e6e6;border-color:#8c8c8c}.btn-default:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default.active.focus,.btn-default.active:focus,.btn-default.active:hover,.btn-default:active.focus,.btn-default:active:focus,.btn-default:active:hover,.open>.dropdown-toggle.btn-default.focus,.open>.dropdown-toggle.btn-default:focus,.open>.dropdown-toggle.btn-default:hover{color:#333;background-color:#d4d4d4;border-color:#8c8c8c}.btn-default.active,.btn-default:active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled.focus,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled].focus,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#337ab7;border-color:#2e6da4}.btn-primary.focus,.btn-primary:focus{color:#fff;background-color:#286090;border-color:#122b40}.btn-primary:hover{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#286090;border-color:#204d74}.btn-primary.active.focus,.btn-primary.active:focus,.btn-primary.active:hover,.btn-primary:active.focus,.btn-primary:active:focus,.btn-primary:active:hover,.open>.dropdown-toggle.btn-primary.focus,.open>.dropdown-toggle.btn-primary:focus,.open>.dropdown-toggle.btn-primary:hover{color:#fff;background-color:#204d74;border-color:#122b40}.btn-primary.active,.btn-primary:active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled.focus,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled].focus,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#337ab7;border-color:#2e6da4}.btn-primary .badge{color:#337ab7;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success.focus,.btn-success:focus{color:#fff;background-color:#449d44;border-color:#255625}.btn-success:hover{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success.active.focus,.btn-success.active:focus,.btn-success.active:hover,.btn-success:active.focus,.btn-success:active:focus,.btn-success:active:hover,.open>.dropdown-toggle.btn-success.focus,.open>.dropdown-toggle.btn-success:focus,.open>.dropdown-toggle.btn-success:hover{color:#fff;background-color:#398439;border-color:#255625}.btn-success.active,.btn-success:active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled.focus,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled].focus,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info.focus,.btn-info:focus{color:#fff;background-color:#31b0d5;border-color:#1b6d85}.btn-info:hover{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info.active.focus,.btn-info.active:focus,.btn-info.active:hover,.btn-info:active.focus,.btn-info:active:focus,.btn-info:active:hover,.open>.dropdown-toggle.btn-info.focus,.open>.dropdown-toggle.btn-info:focus,.open>.dropdown-toggle.btn-info:hover{color:#fff;background-color:#269abc;border-color:#1b6d85}.btn-info.active,.btn-info:active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled.focus,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled].focus,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning.focus,.btn-warning:focus{color:#fff;background-color:#ec971f;border-color:#985f0d}.btn-warning:hover{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning.active.focus,.btn-warning.active:focus,.btn-warning.active:hover,.btn-warning:active.focus,.btn-warning:active:focus,.btn-warning:active:hover,.open>.dropdown-toggle.btn-warning.focus,.open>.dropdown-toggle.btn-warning:focus,.open>.dropdown-toggle.btn-warning:hover{color:#fff;background-color:#d58512;border-color:#985f0d}.btn-warning.active,.btn-warning:active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled.focus,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled].focus,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger.focus,.btn-danger:focus{color:#fff;background-color:#c9302c;border-color:#761c19}.btn-danger:hover{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger.active.focus,.btn-danger.active:focus,.btn-danger.active:hover,.btn-danger:active.focus,.btn-danger:active:focus,.btn-danger:active:hover,.open>.dropdown-toggle.btn-danger.focus,.open>.dropdown-toggle.btn-danger:focus,.open>.dropdown-toggle.btn-danger:hover{color:#fff;background-color:#ac2925;border-color:#761c19}.btn-danger.active,.btn-danger:active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled.focus,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled].focus,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#337ab7;border-radius:0}.btn-link,.btn-link.active,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:active,.btn-link:focus,.btn-link:hover{border-color:transparent}.btn-link:focus,.btn-link:hover{color:#23527c;text-decoration:underline;background-color:transparent}.btn-link[disabled]:focus,.btn-link[disabled]:hover,fieldset[disabled] .btn-link:focus,fieldset[disabled] .btn-link:hover{color:#777;text-decoration:none}.btn-group-lg>.btn,.btn-lg{padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}.btn-group-sm>.btn,.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-xs>.btn,.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition-timing-function:ease;-o-transition-timing-function:ease;transition-timing-function:ease;-webkit-transition-duration:.35s;-o-transition-duration:.35s;transition-duration:.35s;-webkit-transition-property:height,visibility;-o-transition-property:height,visibility;transition-property:height,visibility}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px dashed;border-top:4px solid\9;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown,.dropup{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{color:#fff;text-decoration:none;background-color:#337ab7;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{color:#777}.dropdown-menu>.disabled>a:focus,.dropdown-menu>.disabled>a:hover{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px dashed;border-bottom:4px solid\9}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:2px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;float:left}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:2}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn,.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn input[type=checkbox],[data-toggle=buttons]>.btn input[type=radio],[data-toggle=buttons]>.btn-group>.btn input[type=checkbox],[data-toggle=buttons]>.btn-group>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group .form-control:focus{z-index:3}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.3333333;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn,textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn,textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn{height:auto}.input-group .form-control,.input-group-addon,.input-group-btn{display:table-cell}.input-group .form-control:not(:first-child):not(:last-child),.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=checkbox],.input-group-addon input[type=radio]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn-group:not(:last-child)>.btn,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:first-child>.btn-group:not(:first-child)>.btn,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:active,.input-group-btn>.btn:focus,.input-group-btn>.btn:hover{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{z-index:2;margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:focus,.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:focus,.nav>li.disabled>a:hover{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:focus,.nav .open>a:hover{background-color:#eee;border-color:#337ab7}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:focus,.nav-tabs>li.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:focus,.nav-tabs.nav-justified>.active>a:hover{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:focus,.nav-pills>li.active>a:hover{color:#fff;background-color:#337ab7}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:focus,.nav-tabs-justified>.active>a:hover{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:340px}@media (max-device-width:480px) and (orientation:landscape){.navbar-fixed-bottom .navbar-collapse,.navbar-fixed-top .navbar-collapse{max-height:200px}}.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container-fluid>.navbar-collapse,.container-fluid>.navbar-header,.container>.navbar-collapse,.container>.navbar-header{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-bottom,.navbar-fixed-top{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-bottom,.navbar-fixed-top{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-brand>img{display:block}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu .dropdown-header,.navbar-nav .open .dropdown-menu>li>a{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:focus,.navbar-nav .open .dropdown-menu>li>a:hover{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .form-control-static{display:inline-block}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .form-control,.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .checkbox,.navbar-form .radio{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .checkbox label,.navbar-form .radio label{padding-left:0}.navbar-form .checkbox input[type=checkbox],.navbar-form .radio input[type=radio]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}.navbar-form .form-group:last-child{margin-bottom:0}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{margin-bottom:0;border-top-left-radius:4px;border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important;margin-right:-15px}.navbar-right~.navbar-right{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:focus,.navbar-default .navbar-brand:hover{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:focus,.navbar-default .navbar-nav>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:focus,.navbar-default .navbar-nav>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:focus,.navbar-default .navbar-nav>.disabled>a:hover{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:focus,.navbar-default .navbar-toggle:hover{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:focus,.navbar-default .navbar-nav>.open>a:hover{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:focus,.navbar-default .btn-link:hover{color:#333}.navbar-default .btn-link[disabled]:focus,.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:focus,fieldset[disabled] .navbar-default .btn-link:hover{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#9d9d9d}.navbar-inverse .navbar-brand:focus,.navbar-inverse .navbar-brand:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav>li>a:focus,.navbar-inverse .navbar-nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:focus,.navbar-inverse .navbar-nav>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:focus,.navbar-inverse .navbar-nav>.disabled>a:hover{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:focus,.navbar-inverse .navbar-toggle:hover{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:focus,.navbar-inverse .navbar-nav>.open>a:hover{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#9d9d9d}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#9d9d9d}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#9d9d9d}.navbar-inverse .btn-link:focus,.navbar-inverse .btn-link:hover{color:#fff}.navbar-inverse .btn-link[disabled]:focus,.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:focus,fieldset[disabled] .navbar-inverse .btn-link:hover{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#337ab7;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:focus,.pagination>li>a:hover,.pagination>li>span:focus,.pagination>li>span:hover{z-index:2;color:#23527c;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>a:focus,.pagination>.active>a:hover,.pagination>.active>span,.pagination>.active>span:focus,.pagination>.active>span:hover{z-index:3;color:#fff;cursor:default;background-color:#337ab7;border-color:#337ab7}.pagination>.disabled>a,.pagination>.disabled>a:focus,.pagination>.disabled>a:hover,.pagination>.disabled>span,.pagination>.disabled>span:focus,.pagination>.disabled>span:hover{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px;line-height:1.3333333}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px;line-height:1.5}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:focus,.pager li>a:hover{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:focus,.pager .disabled>a:hover,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:focus,a.label:hover{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:focus,.label-default[href]:hover{background-color:#5e5e5e}.label-primary{background-color:#337ab7}.label-primary[href]:focus,.label-primary[href]:hover{background-color:#286090}.label-success{background-color:#5cb85c}.label-success[href]:focus,.label-success[href]:hover{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:focus,.label-info[href]:hover{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:focus,.label-warning[href]:hover{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:focus,.label-danger[href]:hover{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:middle;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-group-xs>.btn .badge,.btn-xs .badge{top:0;padding:1px 5px}a.badge:focus,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#337ab7;background-color:#fff}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding-top:30px;padding-bottom:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron .h1,.jumbotron h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron,.container-fluid .jumbotron{padding-right:15px;padding-left:15px;border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron,.container-fluid .jumbotron{padding-right:60px;padding-left:60px}.jumbotron .h1,.jumbotron h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:border .2s ease-in-out;-o-transition:border .2s ease-in-out;transition:border .2s ease-in-out}.thumbnail a>img,.thumbnail>img{margin-right:auto;margin-left:auto}a.thumbnail.active,a.thumbnail:focus,a.thumbnail:hover{border-color:#337ab7}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#337ab7;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-bar-striped,.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress-bar.active,.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media{margin-top:15px}.media:first-child{margin-top:0}.media,.media-body{overflow:hidden;zoom:1}.media-body{width:10000px}.media-object{display:block}.media-object.img-thumbnail{max-width:none}.media-right,.media>.pull-right{padding-left:10px}.media-left,.media>.pull-left{padding-right:10px}.media-body,.media-left,.media-right{display:table-cell;vertical-align:top}.media-middle{vertical-align:middle}.media-bottom{vertical-align:bottom}.media-heading{margin-top:0;margin-bottom:5px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}a.list-group-item,button.list-group-item{color:#555}a.list-group-item .list-group-item-heading,button.list-group-item .list-group-item-heading{color:#333}a.list-group-item:focus,a.list-group-item:hover,button.list-group-item:focus,button.list-group-item:hover{color:#555;text-decoration:none;background-color:#f5f5f5}button.list-group-item{width:100%;text-align:left}.list-group-item.disabled,.list-group-item.disabled:focus,.list-group-item.disabled:hover{color:#777;cursor:not-allowed;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{z-index:2;color:#fff;background-color:#337ab7;border-color:#337ab7}.list-group-item.active .list-group-item-heading,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:focus .list-group-item-text,.list-group-item.active:hover .list-group-item-text{color:#c7ddef}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success,button.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading,button.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:focus,a.list-group-item-success:hover,button.list-group-item-success:focus,button.list-group-item-success:hover{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:focus,a.list-group-item-success.active:hover,button.list-group-item-success.active,button.list-group-item-success.active:focus,button.list-group-item-success.active:hover{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info,button.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading,button.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:focus,a.list-group-item-info:hover,button.list-group-item-info:focus,button.list-group-item-info:hover{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:focus,a.list-group-item-info.active:hover,button.list-group-item-info.active,button.list-group-item-info.active:focus,button.list-group-item-info.active:hover{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning,button.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading,button.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:focus,a.list-group-item-warning:hover,button.list-group-item-warning:focus,button.list-group-item-warning:hover{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:focus,a.list-group-item-warning.active:hover,button.list-group-item-warning.active,button.list-group-item-warning.active:focus,button.list-group-item-warning.active:hover{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger,button.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading,button.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:focus,a.list-group-item-danger:hover,button.list-group-item-danger:focus,button.list-group-item-danger:hover{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:focus,a.list-group-item-danger.active:hover,button.list-group-item-danger.active,button.list-group-item-danger.active:focus,button.list-group-item-danger.active:hover{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>.small,.panel-title>.small>a,.panel-title>a,.panel-title>small,.panel-title>small>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group,.panel>.panel-collapse>.list-group{margin-bottom:0}.panel>.list-group .list-group-item,.panel>.panel-collapse>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child,.panel>.panel-collapse>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child,.panel>.panel-collapse>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.panel-heading+.panel-collapse>.list-group .list-group-item:first-child{border-top-left-radius:0;border-top-right-radius:0}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.panel-collapse>.table,.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.panel-collapse>.table caption,.panel>.table caption,.panel>.table-responsive>.table caption{padding-right:15px;padding-left:15px}.panel>.table-responsive:first-child>.table:first-child,.panel>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child,.panel>.table:first-child>thead:first-child>tr:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table-responsive:last-child>.table:last-child,.panel>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive,.panel>.table+.panel-body,.panel>.table-responsive+.panel-body{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child td,.panel>.table>tbody:first-child>tr:first-child th{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child{border-left:0}.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child{border-right:0}.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.list-group,.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#337ab7}.panel-primary>.panel-heading{color:#fff;background-color:#337ab7;border-color:#337ab7}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#337ab7}.panel-primary>.panel-heading .badge{color:#337ab7;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#337ab7}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:focus,.close:hover{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);-o-transform:translate(0,-25%);transform:translate(0,-25%)}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);-o-transform:translate(0,0);transform:translate(0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;filter:alpha(opacity=0);opacity:0;line-break:auto}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{right:5px;bottom:0;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;left:5px;margin-bottom:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;right:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;left:5px;margin-top:-5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;font-style:normal;font-weight:400;line-height:1.42857143;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;word-wrap:normal;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);line-break:auto}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>a>img,.carousel-inner>.item>img{line-height:1}@media all and (transform-3d),(-webkit-transform-3d){.carousel-inner>.item{-webkit-transition:-webkit-transform .6s ease-in-out;-o-transition:-o-transform .6s ease-in-out;transition:transform .6s ease-in-out;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-inner>.item.active.right,.carousel-inner>.item.next{left:0;-webkit-transform:translate3d(100%,0,0);transform:translate3d(100%,0,0)}.carousel-inner>.item.active.left,.carousel-inner>.item.prev{left:0;-webkit-transform:translate3d(-100%,0,0);transform:translate3d(-100%,0,0)}.carousel-inner>.item.active,.carousel-inner>.item.next.left,.carousel-inner>.item.prev.right{left:0;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);background-color:rgba(0,0,0,0);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:focus,.carousel-control:hover{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{position:absolute;top:50%;z-index:5;display:inline-block;margin-top:-10px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{left:50%;margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{right:50%;margin-right:-10px}.carousel-control .icon-next,.carousel-control .icon-prev{width:20px;height:20px;font-family:serif;line-height:1}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000\9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next,.carousel-control .icon-prev{width:30px;height:30px;margin-top:-10px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-10px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-10px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.btn-group-vertical>.btn-group:after,.btn-group-vertical>.btn-group:before,.btn-toolbar:after,.btn-toolbar:before,.clearfix:after,.clearfix:before,.container-fluid:after,.container-fluid:before,.container:after,.container:before,.dl-horizontal dd:after,.dl-horizontal dd:before,.form-horizontal .form-group:after,.form-horizontal .form-group:before,.modal-footer:after,.modal-footer:before,.modal-header:after,.modal-header:before,.nav:after,.nav:before,.navbar-collapse:after,.navbar-collapse:before,.navbar-header:after,.navbar-header:before,.navbar:after,.navbar:before,.pager:after,.pager:before,.panel-body:after,.panel-body:before,.row:after,.row:before{display:table;content:" "}.btn-group-vertical>.btn-group:after,.btn-toolbar:after,.clearfix:after,.container-fluid:after,.container:after,.dl-horizontal dd:after,.form-horizontal .form-group:after,.modal-footer:after,.modal-header:after,.nav:after,.navbar-collapse:after,.navbar-header:after,.navbar:after,.pager:after,.panel-body:after,.row:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-lg,.visible-md,.visible-sm,.visible-xs{display:none!important}.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table!important}tr.visible-xs{display:table-row!important}td.visible-xs,th.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table!important}tr.visible-sm{display:table-row!important}td.visible-sm,th.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table!important}tr.visible-md{display:table-row!important}td.visible-md,th.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table!important}tr.visible-lg{display:table-row!important}td.visible-lg,th.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table!important}tr.visible-print{display:table-row!important}td.visible-print,th.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}}
+/*# sourceMappingURL=bootstrap.min.css.map */
\ No newline at end of file
diff --git a/asset/static/css/calendar/fullcalendar.css b/asset/static/css/calendar/fullcalendar.css
new file mode 100755
index 0000000..e036061
--- /dev/null
+++ b/asset/static/css/calendar/fullcalendar.css
@@ -0,0 +1,1116 @@
+/*!
+ * FullCalendar v2.6.1 Stylesheet
+ * Docs & License: http://fullcalendar.io/
+ * (c) 2015 Adam Shaw
+ */
+
+
+.fc {
+ direction: ltr;
+ text-align: left;
+}
+
+.fc-rtl {
+ text-align: right;
+}
+
+body .fc { /* extra precedence to overcome jqui */
+ font-size: 1em;
+}
+
+
+/* Colors
+--------------------------------------------------------------------------------------------------*/
+
+.fc-unthemed th,
+.fc-unthemed td,
+.fc-unthemed thead,
+.fc-unthemed tbody,
+.fc-unthemed .fc-divider,
+.fc-unthemed .fc-row,
+.fc-unthemed .fc-popover {
+ border-color: #ddd;
+}
+
+.fc-unthemed .fc-popover {
+ background-color: #fff;
+}
+
+.fc-unthemed .fc-divider,
+.fc-unthemed .fc-popover .fc-header {
+ background: #eee;
+}
+
+.fc-unthemed .fc-popover .fc-header .fc-close {
+ color: #666;
+}
+
+.fc-unthemed .fc-today {
+ background: #fcf8e3;
+}
+
+.fc-highlight { /* when user is selecting cells */
+ background: #bce8f1;
+ opacity: .3;
+ filter: alpha(opacity=30); /* for IE */
+}
+
+.fc-bgevent { /* default look for background events */
+ background: rgb(143, 223, 130);
+ opacity: .3;
+ filter: alpha(opacity=30); /* for IE */
+}
+
+.fc-nonbusiness { /* default look for non-business-hours areas */
+ /* will inherit .fc-bgevent's styles */
+ background: #d7d7d7;
+}
+
+
+/* Icons (inline elements with styled text that mock arrow icons)
+--------------------------------------------------------------------------------------------------*/
+
+.fc-icon {
+ display: inline-block;
+ width: 1em;
+ height: 1em;
+ line-height: 1em;
+ font-size: 1em;
+ text-align: center;
+ overflow: hidden;
+ font-family: "Courier New", Courier, monospace;
+
+ /* don't allow browser text-selection */
+ -webkit-touch-callout: none;
+ -webkit-user-select: none;
+ -khtml-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ }
+
+/*
+Acceptable font-family overrides for individual icons:
+ "Arial", sans-serif
+ "Times New Roman", serif
+
+NOTE: use percentage font sizes or else old IE chokes
+*/
+
+.fc-icon:after {
+ position: relative;
+ margin: 0 -1em; /* ensures character will be centered, regardless of width */
+}
+
+.fc-icon-left-single-arrow:after {
+ content: "\02039";
+ font-weight: bold;
+ font-size: 200%;
+ top: -7%;
+ left: 3%;
+}
+
+.fc-icon-right-single-arrow:after {
+ content: "\0203A";
+ font-weight: bold;
+ font-size: 200%;
+ top: -7%;
+ left: -3%;
+}
+
+.fc-icon-left-double-arrow:after {
+ content: "\000AB";
+ font-size: 160%;
+ top: -7%;
+}
+
+.fc-icon-right-double-arrow:after {
+ content: "\000BB";
+ font-size: 160%;
+ top: -7%;
+}
+
+.fc-icon-left-triangle:after {
+ content: "\25C4";
+ font-size: 125%;
+ top: 3%;
+ left: -2%;
+}
+
+.fc-icon-right-triangle:after {
+ content: "\25BA";
+ font-size: 125%;
+ top: 3%;
+ left: 2%;
+}
+
+.fc-icon-down-triangle:after {
+ content: "\25BC";
+ font-size: 125%;
+ top: 2%;
+}
+
+.fc-icon-x:after {
+ content: "\000D7";
+ font-size: 200%;
+ top: 6%;
+}
+
+
+/* Buttons (styled tags, normalized to work cross-browser)
+--------------------------------------------------------------------------------------------------*/
+
+.fc button {
+ /* force height to include the border and padding */
+ -moz-box-sizing: border-box;
+ -webkit-box-sizing: border-box;
+ box-sizing: border-box;
+
+ /* dimensions */
+ margin: 0;
+ height: 2.1em;
+ padding: 0 .6em;
+
+ /* text & cursor */
+ font-size: 1em; /* normalize */
+ white-space: nowrap;
+ cursor: pointer;
+}
+
+/* Firefox has an annoying inner border */
+.fc button::-moz-focus-inner { margin: 0; padding: 0; }
+
+.fc-state-default { /* non-theme */
+ border: 1px solid;
+}
+
+.fc-state-default.fc-corner-left { /* non-theme */
+ border-top-left-radius: 4px;
+ border-bottom-left-radius: 4px;
+}
+
+.fc-state-default.fc-corner-right { /* non-theme */
+ border-top-right-radius: 4px;
+ border-bottom-right-radius: 4px;
+}
+
+/* icons in buttons */
+
+.fc button .fc-icon { /* non-theme */
+ position: relative;
+ top: -0.05em; /* seems to be a good adjustment across browsers */
+ margin: 0 .2em;
+ vertical-align: middle;
+}
+
+/*
+ button states
+ borrowed from twitter bootstrap (http://twitter.github.com/bootstrap/)
+*/
+
+.fc-state-default {
+ background-color: #f5f5f5;
+ background-image: -o-linear-gradient(top, #ffffff, #e6e6e6);
+ border-color: #e6e6e6 #e6e6e6 #bfbfbf;
+ border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
+}
+
+.fc-state-hover,
+.fc-state-down,
+.fc-state-active,
+.fc-state-disabled {
+ color: #333333;
+ background-color: #e6e6e6;
+}
+
+.fc-state-hover {
+ color: #333333;
+ text-decoration: none;
+ background-position: 0 -15px;
+ -webkit-transition: background-position 0.1s linear;
+ -moz-transition: background-position 0.1s linear;
+ -o-transition: background-position 0.1s linear;
+ transition: background-position 0.1s linear;
+}
+
+.fc-state-down,
+.fc-state-active {
+ background-color: #cccccc;
+ background-image: none;
+ box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
+}
+
+.fc-state-disabled {
+ cursor: default;
+ background-image: none;
+ opacity: 0.65;
+ filter: alpha(opacity=65);
+ box-shadow: none;
+}
+
+
+/* Buttons Groups
+--------------------------------------------------------------------------------------------------*/
+
+.fc-button-group {
+ display: inline-block;
+}
+
+/*
+every button that is not first in a button group should scootch over one pixel and cover the
+previous button's border...
+*/
+
+.fc .fc-button-group > * { /* extra precedence b/c buttons have margin set to zero */
+ float: left;
+ margin: 0 0 0 -1px;
+}
+
+.fc .fc-button-group > :first-child { /* same */
+ margin-left: 0;
+}
+
+
+/* Popover
+--------------------------------------------------------------------------------------------------*/
+
+.fc-popover {
+ position: absolute;
+ box-shadow: 0 2px 6px rgba(0,0,0,.15);
+}
+
+.fc-popover .fc-header { /* TODO: be more consistent with fc-head/fc-body */
+ padding: 2px 4px;
+}
+
+.fc-popover .fc-header .fc-title {
+ margin: 0 2px;
+}
+
+.fc-popover .fc-header .fc-close {
+ cursor: pointer;
+}
+
+.fc-ltr .fc-popover .fc-header .fc-title,
+.fc-rtl .fc-popover .fc-header .fc-close {
+ float: left;
+}
+
+.fc-rtl .fc-popover .fc-header .fc-title,
+.fc-ltr .fc-popover .fc-header .fc-close {
+ float: right;
+}
+
+/* unthemed */
+
+.fc-unthemed .fc-popover {
+ border-width: 1px;
+ border-style: solid;
+}
+
+.fc-unthemed .fc-popover .fc-header .fc-close {
+ font-size: .9em;
+ margin-top: 2px;
+}
+
+/* jqui themed */
+
+.fc-popover > .ui-widget-header + .ui-widget-content {
+ border-top: 0; /* where they meet, let the header have the border */
+}
+
+
+/* Misc Reusable Components
+--------------------------------------------------------------------------------------------------*/
+
+.fc-divider {
+ border-style: solid;
+ border-width: 1px;
+}
+
+hr.fc-divider {
+ height: 0;
+ margin: 0;
+ padding: 0 0 2px; /* height is unreliable across browsers, so use padding */
+ border-width: 1px 0;
+}
+
+.fc-clear {
+ clear: both;
+}
+
+.fc-bg,
+.fc-bgevent-skeleton,
+.fc-highlight-skeleton,
+.fc-helper-skeleton {
+ /* these element should always cling to top-left/right corners */
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+}
+
+.fc-bg {
+ bottom: 0; /* strech bg to bottom edge */
+}
+
+.fc-bg table {
+ height: 100%; /* strech bg to bottom edge */
+}
+
+
+/* Tables
+--------------------------------------------------------------------------------------------------*/
+
+.fc table {
+ width: 100%;
+ table-layout: fixed;
+ border-collapse: collapse;
+ border-spacing: 0;
+ font-size: 1em; /* normalize cross-browser */
+}
+
+.fc th {
+ text-align: center;
+}
+
+.fc th,
+.fc td {
+ border-style: solid;
+ border-width: 1px;
+ padding: 0;
+ vertical-align: top;
+}
+
+.fc td.fc-today {
+ border-style: double; /* overcome neighboring borders */
+}
+
+
+/* Fake Table Rows
+--------------------------------------------------------------------------------------------------*/
+
+.fc .fc-row { /* extra precedence to overcome themes w/ .ui-widget-content forcing a 1px border */
+ /* no visible border by default. but make available if need be (scrollbar width compensation) */
+ border-style: solid;
+ border-width: 0;
+}
+
+.fc-row table {
+ /* don't put left/right border on anything within a fake row.
+ the outer tbody will worry about this */
+ border-left: 0 hidden transparent;
+ border-right: 0 hidden transparent;
+
+ /* no bottom borders on rows */
+ border-bottom: 0 hidden transparent;
+}
+
+.fc-row:first-child table {
+ border-top: 0 hidden transparent; /* no top border on first row */
+}
+
+
+/* Day Row (used within the header and the DayGrid)
+--------------------------------------------------------------------------------------------------*/
+
+.fc-row {
+ position: relative;
+}
+
+.fc-row .fc-bg {
+ z-index: 1;
+}
+
+/* highlighting cells & background event skeleton */
+
+.fc-row .fc-bgevent-skeleton,
+.fc-row .fc-highlight-skeleton {
+ bottom: 0; /* stretch skeleton to bottom of row */
+}
+
+.fc-row .fc-bgevent-skeleton table,
+.fc-row .fc-highlight-skeleton table {
+ height: 100%; /* stretch skeleton to bottom of row */
+}
+
+.fc-row .fc-highlight-skeleton td,
+.fc-row .fc-bgevent-skeleton td {
+ border-color: transparent;
+}
+
+.fc-row .fc-bgevent-skeleton {
+ z-index: 2;
+
+}
+
+.fc-row .fc-highlight-skeleton {
+ z-index: 3;
+}
+
+/*
+row content (which contains day/week numbers and events) as well as "helper" (which contains
+temporary rendered events).
+*/
+
+.fc-row .fc-content-skeleton {
+ position: relative;
+ z-index: 4;
+ padding-bottom: 2px; /* matches the space above the events */
+}
+
+.fc-row .fc-helper-skeleton {
+ z-index: 5;
+}
+
+.fc-row .fc-content-skeleton td,
+.fc-row .fc-helper-skeleton td {
+ /* see-through to the background below */
+ background: none; /* in case s are globally styled */
+ border-color: transparent;
+
+ /* don't put a border between events and/or the day number */
+ border-bottom: 0;
+}
+
+.fc-row .fc-content-skeleton tbody td, /* cells with events inside (so NOT the day number cell) */
+.fc-row .fc-helper-skeleton tbody td {
+ /* don't put a border between event cells */
+ border-top: 0;
+}
+
+
+/* Scrolling Container
+--------------------------------------------------------------------------------------------------*/
+
+.fc-scroller { /* this class goes on elements for guaranteed vertical scrollbars */
+ overflow-y: scroll;
+ overflow-x: hidden;
+}
+
+.fc-scroller > * { /* we expect an immediate inner element */
+ position: relative; /* re-scope all positions */
+ width: 100%; /* hack to force re-sizing this inner element when scrollbars appear/disappear */
+ overflow: hidden; /* don't let negative margins or absolute positioning create further scroll */
+}
+
+
+/* Global Event Styles
+--------------------------------------------------------------------------------------------------*/
+
+.fc-event {
+ position: relative; /* for resize handle and other inner positioning */
+ display: block; /* make the tag block */
+ font-size: .85em;
+ line-height: 1.3;
+ border-radius: 3px;
+ border: 1px solid #3a87ad; /* default BORDER color */
+ background-color: #3a87ad; /* default BACKGROUND color */
+ font-weight: normal; /* undo jqui's ui-widget-header bold */
+}
+
+/* overpower some of bootstrap's and jqui's styles on tags */
+.fc-event,
+.fc-event:hover,
+.ui-widget .fc-event {
+ color: #fff; /* default TEXT color */
+ text-decoration: none; /* if has an href */
+}
+
+.fc-event[href],
+.fc-event.fc-draggable {
+ cursor: pointer; /* give events with links and draggable events a hand mouse pointer */
+}
+
+.fc-not-allowed, /* causes a "warning" cursor. applied on body */
+.fc-not-allowed .fc-event { /* to override an event's custom cursor */
+ cursor: not-allowed;
+}
+
+.fc-event .fc-bg { /* the generic .fc-bg already does position */
+ z-index: 1;
+ background: #fff;
+ opacity: .25;
+ filter: alpha(opacity=25); /* for IE */
+}
+
+.fc-event .fc-content {
+ position: relative;
+ z-index: 2;
+}
+
+.fc-event .fc-resizer {
+ position: absolute;
+ z-index: 3;
+}
+
+
+/* Horizontal Events
+--------------------------------------------------------------------------------------------------*/
+
+/* events that are continuing to/from another week. kill rounded corners and butt up against edge */
+
+.fc-ltr .fc-h-event.fc-not-start,
+.fc-rtl .fc-h-event.fc-not-end {
+ margin-left: 0;
+ border-left-width: 0;
+ padding-left: 1px; /* replace the border with padding */
+ border-top-left-radius: 0;
+ border-bottom-left-radius: 0;
+}
+
+.fc-ltr .fc-h-event.fc-not-end,
+.fc-rtl .fc-h-event.fc-not-start {
+ margin-right: 0;
+ border-right-width: 0;
+ padding-right: 1px; /* replace the border with padding */
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 0;
+}
+
+/* resizer */
+
+.fc-h-event .fc-resizer { /* positioned it to overcome the event's borders */
+ top: -1px;
+ bottom: -1px;
+ left: -1px;
+ right: -1px;
+ width: 5px;
+}
+
+/* left resizer */
+.fc-ltr .fc-h-event .fc-start-resizer,
+.fc-ltr .fc-h-event .fc-start-resizer:before,
+.fc-ltr .fc-h-event .fc-start-resizer:after,
+.fc-rtl .fc-h-event .fc-end-resizer,
+.fc-rtl .fc-h-event .fc-end-resizer:before,
+.fc-rtl .fc-h-event .fc-end-resizer:after {
+ right: auto; /* ignore the right and only use the left */
+ cursor: w-resize;
+}
+
+/* right resizer */
+.fc-ltr .fc-h-event .fc-end-resizer,
+.fc-ltr .fc-h-event .fc-end-resizer:before,
+.fc-ltr .fc-h-event .fc-end-resizer:after,
+.fc-rtl .fc-h-event .fc-start-resizer,
+.fc-rtl .fc-h-event .fc-start-resizer:before,
+.fc-rtl .fc-h-event .fc-start-resizer:after {
+ left: auto; /* ignore the left and only use the right */
+ cursor: e-resize;
+}
+
+
+/* DayGrid events
+----------------------------------------------------------------------------------------------------
+We use the full "fc-day-grid-event" class instead of using descendants because the event won't
+be a descendant of the grid when it is being dragged.
+*/
+
+.fc-day-grid-event {
+ margin: 1px 2px 0; /* spacing between events and edges */
+ padding: 0 1px;
+}
+
+
+.fc-day-grid-event .fc-content { /* force events to be one-line tall */
+ white-space: nowrap;
+ overflow: hidden;
+}
+
+.fc-day-grid-event .fc-time {
+ font-weight: bold;
+}
+
+.fc-day-grid-event .fc-resizer { /* enlarge the default hit area */
+ left: -3px;
+ right: -3px;
+ width: 7px;
+}
+
+
+/* Event Limiting
+--------------------------------------------------------------------------------------------------*/
+
+/* "more" link that represents hidden events */
+
+a.fc-more {
+ margin: 1px 3px;
+ font-size: .85em;
+ cursor: pointer;
+ text-decoration: none;
+}
+
+a.fc-more:hover {
+ text-decoration: underline;
+}
+
+.fc-limited { /* rows and cells that are hidden because of a "more" link */
+ display: none;
+}
+
+/* popover that appears when "more" link is clicked */
+
+.fc-day-grid .fc-row {
+ z-index: 1; /* make the "more" popover one higher than this */
+}
+
+.fc-more-popover {
+ z-index: 2;
+ width: 220px;
+}
+
+.fc-more-popover .fc-event-container {
+ padding: 10px;
+}
+
+
+/* Now Indicator
+--------------------------------------------------------------------------------------------------*/
+
+.fc-now-indicator {
+ position: absolute;
+ border: 0 solid red;
+}
+
+/* Toolbar
+--------------------------------------------------------------------------------------------------*/
+
+.fc-toolbar {
+ text-align: center;
+ margin-bottom: 1em;
+}
+
+.fc-toolbar .fc-left {
+ float: left;
+}
+
+.fc-toolbar .fc-right {
+ float: right;
+}
+
+.fc-toolbar .fc-center {
+ display: inline-block;
+}
+
+/* the things within each left/right/center section */
+.fc .fc-toolbar > * > * { /* extra precedence to override button border margins */
+ float: left;
+ margin-left: .75em;
+}
+
+/* the first thing within each left/center/right section */
+.fc .fc-toolbar > * > :first-child { /* extra precedence to override button border margins */
+ margin-left: 0;
+}
+
+/* title text */
+
+.fc-toolbar h2 {
+ margin: 0;
+}
+
+/* button layering (for border precedence) */
+
+.fc-toolbar button {
+ position: relative;
+}
+
+.fc-toolbar .fc-state-hover,
+.fc-toolbar .ui-state-hover {
+ z-index: 2;
+}
+
+.fc-toolbar .fc-state-down {
+ z-index: 3;
+}
+
+.fc-toolbar .fc-state-active,
+.fc-toolbar .ui-state-active {
+ z-index: 4;
+}
+
+.fc-toolbar button:focus {
+ z-index: 5;
+}
+
+
+/* View Structure
+--------------------------------------------------------------------------------------------------*/
+
+/* undo twitter bootstrap's box-sizing rules. normalizes positioning techniques */
+/* don't do this for the toolbar because we'll want bootstrap to style those buttons as some pt */
+.fc-view-container *,
+.fc-view-container *:before,
+.fc-view-container *:after {
+ -webkit-box-sizing: content-box;
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+}
+
+.fc-view, /* scope positioning and z-index's for everything within the view */
+.fc-view > table { /* so dragged elements can be above the view's main element */
+ position: relative;
+ z-index: 1;
+}
+
+/* BasicView
+--------------------------------------------------------------------------------------------------*/
+
+/* day row structure */
+
+.fc-basicWeek-view .fc-content-skeleton,
+.fc-basicDay-view .fc-content-skeleton {
+ /* we are sure there are no day numbers in these views, so... */
+ padding-top: 1px; /* add a pixel to make sure there are 2px padding above events */
+ padding-bottom: 1em; /* ensure a space at bottom of cell for user selecting/clicking */
+}
+
+.fc-basic-view .fc-body .fc-row {
+ min-height: 4em; /* ensure that all rows are at least this tall */
+}
+
+/* a "rigid" row will take up a constant amount of height because content-skeleton is absolute */
+
+.fc-row.fc-rigid {
+ overflow: hidden;
+}
+
+.fc-row.fc-rigid .fc-content-skeleton {
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+}
+
+/* week and day number styling */
+
+.fc-basic-view .fc-week-number,
+.fc-basic-view .fc-day-number {
+ padding: 0 2px;
+}
+
+.fc-basic-view td.fc-week-number span,
+.fc-basic-view td.fc-day-number {
+ padding-top: 2px;
+ padding-bottom: 2px;
+}
+
+.fc-basic-view .fc-week-number {
+ text-align: center;
+}
+
+.fc-basic-view .fc-week-number span {
+ /* work around the way we do column resizing and ensure a minimum width */
+ display: inline-block;
+ min-width: 1.25em;
+}
+
+.fc-ltr .fc-basic-view .fc-day-number {
+ text-align: right;
+}
+
+.fc-rtl .fc-basic-view .fc-day-number {
+ text-align: left;
+}
+
+.fc-day-number.fc-other-month {
+ opacity: 0.3;
+ filter: alpha(opacity=30); /* for IE */
+ /* opacity with small font can sometimes look too faded
+ might want to set the 'color' property instead
+ making day-numbers bold also fixes the problem */
+}
+
+/* AgendaView all-day area
+--------------------------------------------------------------------------------------------------*/
+
+.fc-agenda-view .fc-day-grid {
+ position: relative;
+ z-index: 2; /* so the "more.." popover will be over the time grid */
+}
+
+.fc-agenda-view .fc-day-grid .fc-row {
+ min-height: 3em; /* all-day section will never get shorter than this */
+}
+
+.fc-agenda-view .fc-day-grid .fc-row .fc-content-skeleton {
+ padding-top: 1px; /* add a pixel to make sure there are 2px padding above events */
+ padding-bottom: 1em; /* give space underneath events for clicking/selecting days */
+}
+
+
+/* TimeGrid axis running down the side (for both the all-day area and the slot area)
+--------------------------------------------------------------------------------------------------*/
+
+.fc .fc-axis { /* .fc to overcome default cell styles */
+ vertical-align: middle;
+ padding: 0 4px;
+ white-space: nowrap;
+}
+
+.fc-ltr .fc-axis {
+ text-align: right;
+}
+
+.fc-rtl .fc-axis {
+ text-align: left;
+}
+
+.ui-widget td.fc-axis {
+ font-weight: normal; /* overcome jqui theme making it bold */
+}
+
+
+/* TimeGrid Structure
+--------------------------------------------------------------------------------------------------*/
+
+.fc-time-grid-container, /* so scroll container's z-index is below all-day */
+.fc-time-grid { /* so slats/bg/content/etc positions get scoped within here */
+ position: relative;
+ z-index: 1;
+}
+
+.fc-time-grid {
+ min-height: 100%; /* so if height setting is 'auto', .fc-bg stretches to fill height */
+}
+
+.fc-time-grid table { /* don't put outer borders on slats/bg/content/etc */
+ border: 0 hidden transparent;
+}
+
+.fc-time-grid > .fc-bg {
+ z-index: 1;
+}
+
+.fc-time-grid .fc-slats,
+.fc-time-grid > hr { /* the AgendaView injects when grid is shorter than scroller */
+ position: relative;
+ z-index: 2;
+}
+
+.fc-time-grid .fc-content-col {
+ position: relative; /* because now-indicator lives directly inside */
+}
+
+.fc-time-grid .fc-content-skeleton {
+ position: absolute;
+ z-index: 3;
+ top: 0;
+ left: 0;
+ right: 0;
+}
+
+/* divs within a cell within the fc-content-skeleton */
+
+.fc-time-grid .fc-business-container {
+ position: relative;
+ z-index: 1;
+}
+
+.fc-time-grid .fc-bgevent-container {
+ position: relative;
+ z-index: 2;
+}
+
+.fc-time-grid .fc-highlight-container {
+ position: relative;
+ z-index: 3;
+}
+
+.fc-time-grid .fc-event-container {
+ position: relative;
+ z-index: 4;
+}
+
+.fc-time-grid .fc-now-indicator-line {
+ z-index: 5;
+}
+
+.fc-time-grid .fc-helper-container { /* also is fc-event-container */
+ position: relative;
+ z-index: 6;
+}
+
+
+/* TimeGrid Slats (lines that run horizontally)
+--------------------------------------------------------------------------------------------------*/
+
+.fc-time-grid .fc-slats td {
+ height: 1.5em;
+ border-bottom: 0; /* each cell is responsible for its top border */
+}
+
+.fc-time-grid .fc-slats .fc-minor td {
+ border-top-style: dotted;
+}
+
+.fc-time-grid .fc-slats .ui-widget-content { /* for jqui theme */
+ background: none; /* see through to fc-bg */
+}
+
+
+/* TimeGrid Highlighting Slots
+--------------------------------------------------------------------------------------------------*/
+
+.fc-time-grid .fc-highlight-container { /* a div within a cell within the fc-highlight-skeleton */
+ position: relative; /* scopes the left/right of the fc-highlight to be in the column */
+}
+
+.fc-time-grid .fc-highlight {
+ position: absolute;
+ left: 0;
+ right: 0;
+ /* top and bottom will be in by JS */
+}
+
+
+/* TimeGrid Event Containment
+--------------------------------------------------------------------------------------------------*/
+
+.fc-ltr .fc-time-grid .fc-event-container { /* space on the sides of events for LTR (default) */
+ margin: 0 2.5% 0 2px;
+}
+
+.fc-rtl .fc-time-grid .fc-event-container { /* space on the sides of events for RTL */
+ margin: 0 2px 0 2.5%;
+}
+
+.fc-time-grid .fc-event,
+.fc-time-grid .fc-bgevent {
+ position: absolute;
+ z-index: 1; /* scope inner z-index's */
+}
+
+.fc-time-grid .fc-bgevent {
+ /* background events always span full width */
+ left: 0;
+ right: 0;
+}
+
+
+/* Generic Vertical Event
+--------------------------------------------------------------------------------------------------*/
+
+.fc-v-event.fc-not-start { /* events that are continuing from another day */
+ /* replace space made by the top border with padding */
+ border-top-width: 0;
+ padding-top: 1px;
+
+ /* remove top rounded corners */
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+}
+
+.fc-v-event.fc-not-end {
+ /* replace space made by the top border with padding */
+ border-bottom-width: 0;
+ padding-bottom: 1px;
+
+ /* remove bottom rounded corners */
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0;
+}
+
+
+/* TimeGrid Event Styling
+----------------------------------------------------------------------------------------------------
+We use the full "fc-time-grid-event" class instead of using descendants because the event won't
+be a descendant of the grid when it is being dragged.
+*/
+
+.fc-time-grid-event {
+ overflow: hidden; /* don't let the bg flow over rounded corners */
+}
+
+.fc-time-grid-event .fc-time,
+.fc-time-grid-event .fc-title {
+ padding: 0 1px;
+}
+
+.fc-time-grid-event .fc-time {
+ font-size: .85em;
+ white-space: nowrap;
+}
+
+/* short mode, where time and title are on the same line */
+
+.fc-time-grid-event.fc-short .fc-content {
+ /* don't wrap to second line (now that contents will be inline) */
+ white-space: nowrap;
+}
+
+.fc-time-grid-event.fc-short .fc-time,
+.fc-time-grid-event.fc-short .fc-title {
+ /* put the time and title on the same line */
+ display: inline-block;
+ vertical-align: top;
+}
+
+.fc-time-grid-event.fc-short .fc-time span {
+ display: none; /* don't display the full time text... */
+}
+
+.fc-time-grid-event.fc-short .fc-time:before {
+ content: attr(data-start); /* ...instead, display only the start time */
+}
+
+.fc-time-grid-event.fc-short .fc-time:after {
+ content: "\000A0-\000A0"; /* seperate with a dash, wrapped in nbsp's */
+}
+
+.fc-time-grid-event.fc-short .fc-title {
+ font-size: .85em; /* make the title text the same size as the time */
+ padding: 0; /* undo padding from above */
+}
+
+/* resizer */
+
+.fc-time-grid-event .fc-resizer {
+ left: 0;
+ right: 0;
+ bottom: 0;
+ height: 8px;
+ overflow: hidden;
+ line-height: 8px;
+ font-size: 11px;
+ font-family: monospace;
+ text-align: center;
+ cursor: s-resize;
+}
+
+.fc-time-grid-event .fc-resizer:after {
+ content: "=";
+}
+
+
+/* Now Indicator
+--------------------------------------------------------------------------------------------------*/
+
+.fc-time-grid .fc-now-indicator-line {
+ border-top-width: 1px;
+ left: 0;
+ right: 0;
+}
+
+/* arrow on axis */
+
+.fc-time-grid .fc-now-indicator-arrow {
+ margin-top: -5px; /* vertically center on top coordinate */
+}
+
+.fc-ltr .fc-time-grid .fc-now-indicator-arrow {
+ left: 0;
+ /* triangle pointing right... */
+ border-width: 5px 0 5px 6px;
+ border-top-color: transparent;
+ border-bottom-color: transparent;
+}
+
+.fc-rtl .fc-time-grid .fc-now-indicator-arrow {
+ right: 0;
+ /* triangle pointing left... */
+ border-width: 5px 6px 5px 0;
+ border-top-color: transparent;
+ border-bottom-color: transparent;
+}
diff --git a/asset/static/css/calendar/fullcalendar.min.css b/asset/static/css/calendar/fullcalendar.min.css
new file mode 100755
index 0000000..2dc7e9f
--- /dev/null
+++ b/asset/static/css/calendar/fullcalendar.min.css
@@ -0,0 +1,5 @@
+/*!
+ * FullCalendar v2.6.1 Stylesheet
+ * Docs & License: http://fullcalendar.io/
+ * (c) 2015 Adam Shaw
+ */.fc{direction:ltr;text-align:left}.fc-rtl{text-align:right}body .fc{font-size:1em}.fc-unthemed .fc-divider,.fc-unthemed .fc-popover,.fc-unthemed .fc-row,.fc-unthemed tbody,.fc-unthemed td,.fc-unthemed th,.fc-unthemed thead{border-color:#ddd}.fc-unthemed .fc-popover{background-color:#fff}.fc-unthemed .fc-divider,.fc-unthemed .fc-popover .fc-header{background:#eee}.fc-unthemed .fc-popover .fc-header .fc-close{color:#666}.fc-unthemed .fc-today{background:#fcf8e3}.fc-highlight{background:#bce8f1;opacity:.3;filter:alpha(opacity=30)}.fc-bgevent{background:#8fdf82;opacity:.3;filter:alpha(opacity=30)}.fc-nonbusiness{background:#d7d7d7}.fc-icon{display:inline-block;width:1em;height:1em;line-height:1em;font-size:1em;text-align:center;overflow:hidden;font-family:"Courier New",Courier,monospace;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.fc-icon:after{position:relative;margin:0 -1em}.fc-icon-left-single-arrow:after{content:"\02039";font-weight:700;font-size:200%;top:-7%;left:3%}.fc-icon-right-single-arrow:after{content:"\0203A";font-weight:700;font-size:200%;top:-7%;left:-3%}.fc-icon-left-double-arrow:after{content:"\000AB";font-size:160%;top:-7%}.fc-icon-right-double-arrow:after{content:"\000BB";font-size:160%;top:-7%}.fc-icon-left-triangle:after{content:"\25C4";font-size:125%;top:3%;left:-2%}.fc-icon-right-triangle:after{content:"\25BA";font-size:125%;top:3%;left:2%}.fc-icon-down-triangle:after{content:"\25BC";font-size:125%;top:2%}.fc-icon-x:after{content:"\000D7";font-size:200%;top:6%}.fc button{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box;margin:0;height:2.1em;padding:0 .6em;font-size:1em;white-space:nowrap;cursor:pointer}.fc button::-moz-focus-inner{margin:0;padding:0}.fc-state-default{border:1px solid}.fc-state-default.fc-corner-left{border-top-left-radius:4px;border-bottom-left-radius:4px}.fc-state-default.fc-corner-right{border-top-right-radius:4px;border-bottom-right-radius:4px}.fc button .fc-icon{position:relative;top:-.05em;margin:0 .2em;vertical-align:middle}.fc-state-default{background-color:#f5f5f5;background-image:-moz-linear-gradient(top,#fff,#e6e6e6);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,#e6e6e6);background-image:-o-linear-gradient(top,#fff,#e6e6e6);background-image:linear-gradient(to bottom,#fff,#e6e6e6);background-repeat:repeat-x;border-color:#e6e6e6 #e6e6e6 #bfbfbf;border-color:rgba(0,0,0,.1) rgba(0,0,0,.1) rgba(0,0,0,.25);color:#333;text-shadow:0 1px 1px rgba(255,255,255,.75);box-shadow:inset 0 1px 0 rgba(255,255,255,.2),0 1px 2px rgba(0,0,0,.05)}.fc-state-active,.fc-state-disabled,.fc-state-down,.fc-state-hover{color:#333;background-color:#e6e6e6}.fc-state-hover{color:#333;text-decoration:none;background-position:0 -15px;-webkit-transition:background-position .1s linear;-moz-transition:background-position .1s linear;-o-transition:background-position .1s linear;transition:background-position .1s linear}.fc-state-active,.fc-state-down{background-color:#ccc;background-image:none;box-shadow:inset 0 2px 4px rgba(0,0,0,.15),0 1px 2px rgba(0,0,0,.05)}.fc-state-disabled{cursor:default;background-image:none;opacity:.65;filter:alpha(opacity=65);box-shadow:none}.fc-button-group{display:inline-block}.fc .fc-button-group>*{float:left;margin:0 0 0 -1px}.fc .fc-button-group>:first-child{margin-left:0}.fc-popover{position:absolute;box-shadow:0 2px 6px rgba(0,0,0,.15)}.fc-popover .fc-header{padding:2px 4px}.fc-popover .fc-header .fc-title{margin:0 2px}.fc-popover .fc-header .fc-close{cursor:pointer}.fc-ltr .fc-popover .fc-header .fc-title,.fc-rtl .fc-popover .fc-header .fc-close{float:left}.fc-ltr .fc-popover .fc-header .fc-close,.fc-rtl .fc-popover .fc-header .fc-title{float:right}.fc-unthemed .fc-popover{border-width:1px;border-style:solid}.fc-unthemed .fc-popover .fc-header .fc-close{font-size:.9em;margin-top:2px}.fc-popover>.ui-widget-header+.ui-widget-content{border-top:0}.fc-divider{border-style:solid;border-width:1px}hr.fc-divider{height:0;margin:0;padding:0 0 2px;border-width:1px 0}.fc-clear{clear:both}.fc-bg,.fc-bgevent-skeleton,.fc-helper-skeleton,.fc-highlight-skeleton{position:absolute;top:0;left:0;right:0}.fc-bg{bottom:0}.fc-bg table{height:100%}.fc table{width:100%;table-layout:fixed;border-collapse:collapse;border-spacing:0;font-size:1em}.fc th{text-align:center}.fc td,.fc th{border-style:solid;border-width:1px;padding:0;vertical-align:top}.fc td.fc-today{border-style:double}.fc .fc-row{border-style:solid;border-width:0}.fc-row table{border-left:0 hidden transparent;border-right:0 hidden transparent;border-bottom:0 hidden transparent}.fc-row:first-child table{border-top:0 hidden transparent}.fc-row{position:relative}.fc-row .fc-bg{z-index:1}.fc-row .fc-bgevent-skeleton,.fc-row .fc-highlight-skeleton{bottom:0}.fc-row .fc-bgevent-skeleton table,.fc-row .fc-highlight-skeleton table{height:100%}.fc-row .fc-bgevent-skeleton td,.fc-row .fc-highlight-skeleton td{border-color:transparent}.fc-row .fc-bgevent-skeleton{z-index:2}.fc-row .fc-highlight-skeleton{z-index:3}.fc-row .fc-content-skeleton{position:relative;z-index:4;padding-bottom:2px}.fc-row .fc-helper-skeleton{z-index:5}.fc-row .fc-content-skeleton td,.fc-row .fc-helper-skeleton td{background:0 0;border-color:transparent;border-bottom:0}.fc-row .fc-content-skeleton tbody td,.fc-row .fc-helper-skeleton tbody td{border-top:0}.fc-scroller{overflow-y:scroll;overflow-x:hidden}.fc-scroller>*{position:relative;width:100%;overflow:hidden}.fc-event{position:relative;display:block;font-size:.85em;line-height:1.3;border-radius:3px;border:1px solid #3a87ad;background-color:#3a87ad;font-weight:400}.fc-event,.fc-event:hover,.ui-widget .fc-event{color:#fff;text-decoration:none}.fc-event.fc-draggable,.fc-event[href]{cursor:pointer}.fc-not-allowed,.fc-not-allowed .fc-event{cursor:not-allowed}.fc-event .fc-bg{z-index:1;background:#fff;opacity:.25;filter:alpha(opacity=25)}.fc-event .fc-content{position:relative;z-index:2}.fc-event .fc-resizer{position:absolute;z-index:3}.fc-ltr .fc-h-event.fc-not-start,.fc-rtl .fc-h-event.fc-not-end{margin-left:0;border-left-width:0;padding-left:1px;border-top-left-radius:0;border-bottom-left-radius:0}.fc-ltr .fc-h-event.fc-not-end,.fc-rtl .fc-h-event.fc-not-start{margin-right:0;border-right-width:0;padding-right:1px;border-top-right-radius:0;border-bottom-right-radius:0}.fc-h-event .fc-resizer{top:-1px;bottom:-1px;left:-1px;right:-1px;width:5px}.fc-ltr .fc-h-event .fc-start-resizer,.fc-ltr .fc-h-event .fc-start-resizer:after,.fc-ltr .fc-h-event .fc-start-resizer:before,.fc-rtl .fc-h-event .fc-end-resizer,.fc-rtl .fc-h-event .fc-end-resizer:after,.fc-rtl .fc-h-event .fc-end-resizer:before{right:auto;cursor:w-resize}.fc-ltr .fc-h-event .fc-end-resizer,.fc-ltr .fc-h-event .fc-end-resizer:after,.fc-ltr .fc-h-event .fc-end-resizer:before,.fc-rtl .fc-h-event .fc-start-resizer,.fc-rtl .fc-h-event .fc-start-resizer:after,.fc-rtl .fc-h-event .fc-start-resizer:before{left:auto;cursor:e-resize}.fc-day-grid-event{margin:1px 2px 0;padding:0 1px}.fc-day-grid-event .fc-content{white-space:nowrap;overflow:hidden}.fc-day-grid-event .fc-time{font-weight:700}.fc-day-grid-event .fc-resizer{left:-3px;right:-3px;width:7px}a.fc-more{margin:1px 3px;font-size:.85em;cursor:pointer;text-decoration:none}a.fc-more:hover{text-decoration:underline}.fc-limited{display:none}.fc-day-grid .fc-row{z-index:1}.fc-more-popover{z-index:2;width:220px}.fc-more-popover .fc-event-container{padding:10px}.fc-now-indicator{position:absolute;border:0 solid red}.fc-toolbar{text-align:center;margin-bottom:1em}.fc-toolbar .fc-left{float:left}.fc-toolbar .fc-right{float:right}.fc-toolbar .fc-center{display:inline-block}.fc .fc-toolbar>*>*{float:left;margin-left:.75em}.fc .fc-toolbar>*>:first-child{margin-left:0}.fc-toolbar h2{margin:0}.fc-toolbar button{position:relative}.fc-toolbar .fc-state-hover,.fc-toolbar .ui-state-hover{z-index:2}.fc-toolbar .fc-state-down{z-index:3}.fc-toolbar .fc-state-active,.fc-toolbar .ui-state-active{z-index:4}.fc-toolbar button:focus{z-index:5}.fc-view-container *,.fc-view-container :after,.fc-view-container :before{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}.fc-view,.fc-view>table{position:relative;z-index:1}.fc-basicDay-view .fc-content-skeleton,.fc-basicWeek-view .fc-content-skeleton{padding-top:1px;padding-bottom:1em}.fc-basic-view .fc-body .fc-row{min-height:4em}.fc-row.fc-rigid{overflow:hidden}.fc-row.fc-rigid .fc-content-skeleton{position:absolute;top:0;left:0;right:0}.fc-basic-view .fc-day-number,.fc-basic-view .fc-week-number{padding:0 2px}.fc-basic-view td.fc-day-number,.fc-basic-view td.fc-week-number span{padding-top:2px;padding-bottom:2px}.fc-basic-view .fc-week-number{text-align:center}.fc-basic-view .fc-week-number span{display:inline-block;min-width:1.25em}.fc-ltr .fc-basic-view .fc-day-number{text-align:right}.fc-rtl .fc-basic-view .fc-day-number{text-align:left}.fc-day-number.fc-other-month{opacity:.3;filter:alpha(opacity=30)}.fc-agenda-view .fc-day-grid{position:relative;z-index:2}.fc-agenda-view .fc-day-grid .fc-row{min-height:3em}.fc-agenda-view .fc-day-grid .fc-row .fc-content-skeleton{padding-top:1px;padding-bottom:1em}.fc .fc-axis{vertical-align:middle;padding:0 4px;white-space:nowrap}.fc-ltr .fc-axis{text-align:right}.fc-rtl .fc-axis{text-align:left}.ui-widget td.fc-axis{font-weight:400}.fc-time-grid,.fc-time-grid-container{position:relative;z-index:1}.fc-time-grid{min-height:100%}.fc-time-grid table{border:0 hidden transparent}.fc-time-grid>.fc-bg{z-index:1}.fc-time-grid .fc-slats,.fc-time-grid>hr{position:relative;z-index:2}.fc-time-grid .fc-content-col{position:relative}.fc-time-grid .fc-content-skeleton{position:absolute;z-index:3;top:0;left:0;right:0}.fc-time-grid .fc-business-container{position:relative;z-index:1}.fc-time-grid .fc-bgevent-container{position:relative;z-index:2}.fc-time-grid .fc-highlight-container{z-index:3}.fc-time-grid .fc-event-container{position:relative;z-index:4}.fc-time-grid .fc-now-indicator-line{z-index:5}.fc-time-grid .fc-helper-container{position:relative;z-index:6}.fc-time-grid .fc-slats td{height:1.5em;border-bottom:0}.fc-time-grid .fc-slats .fc-minor td{border-top-style:dotted}.fc-time-grid .fc-slats .ui-widget-content{background:0 0}.fc-time-grid .fc-highlight-container{position:relative}.fc-time-grid .fc-highlight{position:absolute;left:0;right:0}.fc-ltr .fc-time-grid .fc-event-container{margin:0 2.5% 0 2px}.fc-rtl .fc-time-grid .fc-event-container{margin:0 2px 0 2.5%}.fc-time-grid .fc-bgevent,.fc-time-grid .fc-event{position:absolute;z-index:1}.fc-time-grid .fc-bgevent{left:0;right:0}.fc-v-event.fc-not-start{border-top-width:0;padding-top:1px;border-top-left-radius:0;border-top-right-radius:0}.fc-v-event.fc-not-end{border-bottom-width:0;padding-bottom:1px;border-bottom-left-radius:0;border-bottom-right-radius:0}.fc-time-grid-event{overflow:hidden}.fc-time-grid-event .fc-time,.fc-time-grid-event .fc-title{padding:0 1px}.fc-time-grid-event .fc-time{font-size:.85em;white-space:nowrap}.fc-time-grid-event.fc-short .fc-content{white-space:nowrap}.fc-time-grid-event.fc-short .fc-time,.fc-time-grid-event.fc-short .fc-title{display:inline-block;vertical-align:top}.fc-time-grid-event.fc-short .fc-time span{display:none}.fc-time-grid-event.fc-short .fc-time:before{content:attr(data-start)}.fc-time-grid-event.fc-short .fc-time:after{content:"\000A0-\000A0"}.fc-time-grid-event.fc-short .fc-title{font-size:.85em;padding:0}.fc-time-grid-event .fc-resizer{left:0;right:0;bottom:0;height:8px;overflow:hidden;line-height:8px;font-size:11px;font-family:monospace;text-align:center;cursor:s-resize}.fc-time-grid-event .fc-resizer:after{content:"="}.fc-time-grid .fc-now-indicator-line{border-top-width:1px;left:0;right:0}.fc-time-grid .fc-now-indicator-arrow{margin-top:-5px}.fc-ltr .fc-time-grid .fc-now-indicator-arrow{left:0;border-width:5px 0 5px 6px;border-top-color:transparent;border-bottom-color:transparent}.fc-rtl .fc-time-grid .fc-now-indicator-arrow{right:0;border-width:5px 6px 5px 0;border-top-color:transparent;border-bottom-color:transparent}
\ No newline at end of file
diff --git a/asset/static/css/calendar/fullcalendar.print.css b/asset/static/css/calendar/fullcalendar.print.css
new file mode 100755
index 0000000..e399f61
--- /dev/null
+++ b/asset/static/css/calendar/fullcalendar.print.css
@@ -0,0 +1,208 @@
+/*!
+ * FullCalendar v2.6.1 Print Stylesheet
+ * Docs & License: http://fullcalendar.io/
+ * (c) 2015 Adam Shaw
+ */
+
+/*
+ * Include this stylesheet on your page to get a more printer-friendly calendar.
+ * When including this stylesheet, use the media='print' attribute of the tag.
+ * Make sure to include this stylesheet IN ADDITION to the regular fullcalendar.css.
+ */
+
+.fc {
+ max-width: 100% !important;
+}
+
+
+/* Global Event Restyling
+--------------------------------------------------------------------------------------------------*/
+
+.fc-event {
+ background: #fff !important;
+ color: #000 !important;
+ page-break-inside: avoid;
+}
+
+.fc-event .fc-resizer {
+ display: none;
+}
+
+
+/* Table & Day-Row Restyling
+--------------------------------------------------------------------------------------------------*/
+
+th,
+td,
+hr,
+thead,
+tbody,
+.fc-row {
+ border-color: #ccc !important;
+ background: #fff !important;
+}
+
+/* kill the overlaid, absolutely-positioned components */
+/* common... */
+.fc-bg,
+.fc-bgevent-skeleton,
+.fc-highlight-skeleton,
+.fc-helper-skeleton,
+/* for timegrid. within cells within table skeletons... */
+.fc-bgevent-container,
+.fc-business-container,
+.fc-highlight-container,
+.fc-helper-container {
+ display: none;
+}
+
+/* don't force a min-height on rows (for DayGrid) */
+.fc tbody .fc-row {
+ height: auto !important; /* undo height that JS set in distributeHeight */
+ min-height: 0 !important; /* undo the min-height from each view's specific stylesheet */
+}
+
+.fc tbody .fc-row .fc-content-skeleton {
+ position: static; /* undo .fc-rigid */
+ padding-bottom: 0 !important; /* use a more border-friendly method for this... */
+}
+
+.fc tbody .fc-row .fc-content-skeleton tbody tr:last-child td { /* only works in newer browsers */
+ padding-bottom: 1em; /* ...gives space within the skeleton. also ensures min height in a way */
+}
+
+.fc tbody .fc-row .fc-content-skeleton table {
+ /* provides a min-height for the row, but only effective for IE, which exaggerates this value,
+ making it look more like 3em. for other browers, it will already be this tall */
+ height: 1em;
+}
+
+
+/* Undo month-view event limiting. Display all events and hide the "more" links
+--------------------------------------------------------------------------------------------------*/
+
+.fc-more-cell,
+.fc-more {
+ display: none !important;
+}
+
+.fc tr.fc-limited {
+ display: table-row !important;
+}
+
+.fc td.fc-limited {
+ display: table-cell !important;
+}
+
+.fc-popover {
+ display: none; /* never display the "more.." popover in print mode */
+}
+
+
+/* TimeGrid Restyling
+--------------------------------------------------------------------------------------------------*/
+
+/* undo the min-height 100% trick used to fill the container's height */
+.fc-time-grid {
+ min-height: 0 !important;
+}
+
+/* don't display the side axis at all ("all-day" and time cells) */
+.fc-agenda-view .fc-axis {
+ display: none;
+}
+
+/* don't display the horizontal lines */
+.fc-slats,
+.fc-time-grid hr { /* this hr is used when height is underused and needs to be filled */
+ display: none !important; /* important overrides inline declaration */
+}
+
+/* let the container that holds the events be naturally positioned and create real height */
+.fc-time-grid .fc-content-skeleton {
+ position: static;
+}
+
+/* in case there are no events, we still want some height */
+.fc-time-grid .fc-content-skeleton table {
+ height: 4em;
+}
+
+/* kill the horizontal spacing made by the event container. event margins will be done below */
+.fc-time-grid .fc-event-container {
+ margin: 0 !important;
+}
+
+
+/* TimeGrid *Event* Restyling
+--------------------------------------------------------------------------------------------------*/
+
+/* naturally position events, vertically stacking them */
+.fc-time-grid .fc-event {
+ position: static !important;
+ margin: 3px 2px !important;
+}
+
+/* for events that continue to a future day, give the bottom border back */
+.fc-time-grid .fc-event.fc-not-end {
+ border-bottom-width: 1px !important;
+}
+
+/* indicate the event continues via "..." text */
+.fc-time-grid .fc-event.fc-not-end:after {
+ content: "...";
+}
+
+/* for events that are continuations from previous days, give the top border back */
+.fc-time-grid .fc-event.fc-not-start {
+ border-top-width: 1px !important;
+}
+
+/* indicate the event is a continuation via "..." text */
+.fc-time-grid .fc-event.fc-not-start:before {
+ content: "...";
+}
+
+/* time */
+
+/* undo a previous declaration and let the time text span to a second line */
+.fc-time-grid .fc-event .fc-time {
+ white-space: normal !important;
+}
+
+/* hide the the time that is normally displayed... */
+.fc-time-grid .fc-event .fc-time span {
+ display: none;
+}
+
+/* ...replace it with a more verbose version (includes AM/PM) stored in an html attribute */
+.fc-time-grid .fc-event .fc-time:after {
+ content: attr(data-full);
+}
+
+
+/* Vertical Scroller & Containers
+--------------------------------------------------------------------------------------------------*/
+
+/* kill the scrollbars and allow natural height */
+.fc-scroller,
+.fc-day-grid-container, /* these divs might be assigned height, which we need to cleared */
+.fc-time-grid-container { /* */
+ overflow: visible !important;
+ height: auto !important;
+}
+
+/* kill the horizontal border/padding used to compensate for scrollbars */
+.fc-row {
+ border: 0 !important;
+ margin: 0 !important;
+}
+
+
+/* Button Controls
+--------------------------------------------------------------------------------------------------*/
+
+.fc-button-group,
+.fc button {
+ display: none; /* don't display any button-related controls */
+}
diff --git a/asset/static/css/colorpicker/bootstrap-colorpicker.min.css b/asset/static/css/colorpicker/bootstrap-colorpicker.min.css
new file mode 100755
index 0000000..fc7b1d1
--- /dev/null
+++ b/asset/static/css/colorpicker/bootstrap-colorpicker.min.css
@@ -0,0 +1,9 @@
+/*!
+ * Bootstrap Colorpicker
+ * http://mjolnic.github.io/bootstrap-colorpicker/
+ *
+ * Originally written by (c) 2012 Stefan Petre
+ * Licensed under the Apache License v2.0
+ * http://www.apache.org/licenses/LICENSE-2.0.txt
+ *
+ */.colorpicker-saturation{float:left;width:100px;height:100px;cursor:crosshair;background-image:url("../colorpicker/img/bootstrap-colorpicker/saturation.png")}.colorpicker-saturation i{position:absolute;top:0;left:0;display:block;width:5px;height:5px;margin:-4px 0 0 -4px;border:1px solid #000;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.colorpicker-saturation i b{display:block;width:5px;height:5px;border:1px solid #fff;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.colorpicker-hue,.colorpicker-alpha{float:left;width:15px;height:100px;margin-bottom:4px;margin-left:4px;cursor:row-resize}.colorpicker-hue i,.colorpicker-alpha i{position:absolute;top:0;left:0;display:block;width:100%;height:1px;margin-top:-1px;background:#000;border-top:1px solid #fff}.colorpicker-hue{background-image:url("../colorpicker/img/bootstrap-colorpicker/hue.png")}.colorpicker-alpha{display:none;background-image:url("../colorpicker/img/bootstrap-colorpicker/alpha.png")}.colorpicker-saturation,.colorpicker-hue,.colorpicker-alpha{background-size:contain}.colorpicker{top:0;left:0;z-index:2500;min-width:130px;padding:4px;margin-top:1px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;*zoom:1}.colorpicker:before,.colorpicker:after{display:table;line-height:0;content:""}.colorpicker:after{clear:both}.colorpicker:before{position:absolute;top:-7px;left:6px;display:inline-block;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-left:7px solid transparent;border-bottom-color:rgba(0,0,0,0.2);content:''}.colorpicker:after{position:absolute;top:-6px;left:7px;display:inline-block;border-right:6px solid transparent;border-bottom:6px solid #fff;border-left:6px solid transparent;content:''}.colorpicker div{position:relative}.colorpicker.colorpicker-with-alpha{min-width:140px}.colorpicker.colorpicker-with-alpha .colorpicker-alpha{display:block}.colorpicker-color{height:10px;margin-top:5px;clear:both;background-image:url("../colorpicker/img/bootstrap-colorpicker/alpha.png");background-position:0 100%}.colorpicker-color div{height:10px}.colorpicker-selectors{display:none;height:10px;margin-top:5px;clear:both}.colorpicker-selectors i{float:left;width:10px;height:10px;cursor:pointer}.colorpicker-selectors i+i{margin-left:3px}.colorpicker-element .input-group-addon i,.colorpicker-element .add-on i{display:inline-block;width:16px;height:16px;vertical-align:text-top;cursor:pointer}.colorpicker.colorpicker-inline{position:relative;z-index:auto;display:inline-block;float:none}.colorpicker.colorpicker-horizontal{width:110px;height:auto;min-width:110px}.colorpicker.colorpicker-horizontal .colorpicker-saturation{margin-bottom:4px}.colorpicker.colorpicker-horizontal .colorpicker-color{width:100px}.colorpicker.colorpicker-horizontal .colorpicker-hue,.colorpicker.colorpicker-horizontal .colorpicker-alpha{float:left;width:100px;height:15px;margin-bottom:4px;margin-left:0;cursor:col-resize}.colorpicker.colorpicker-horizontal .colorpicker-hue i,.colorpicker.colorpicker-horizontal .colorpicker-alpha i{position:absolute;top:0;left:0;display:block;width:1px;height:15px;margin-top:0;background:#fff;border:0}.colorpicker.colorpicker-horizontal .colorpicker-hue{background-image:url("../colorpicker/img/bootstrap-colorpicker/hue-horizontal.png")}.colorpicker.colorpicker-horizontal .colorpicker-alpha{background-image:url("../colorpicker/img/bootstrap-colorpicker/alpha-horizontal.png")}.colorpicker.colorpicker-hidden{display:none}.colorpicker.colorpicker-visible{display:block}.colorpicker-inline.colorpicker-visible{display:inline-block}.colorpicker-right:before{right:6px;left:auto}.colorpicker-right:after{right:7px;left:auto}.colorpicker-no-arrow:before{border-right:0;border-left:0}.colorpicker-no-arrow:after{border-right:0;border-left:0}
\ No newline at end of file
diff --git a/asset/static/css/colorpicker/img/bootstrap-colorpicker/alpha-horizontal.png b/asset/static/css/colorpicker/img/bootstrap-colorpicker/alpha-horizontal.png
new file mode 100755
index 0000000..d0a65c0
Binary files /dev/null and b/asset/static/css/colorpicker/img/bootstrap-colorpicker/alpha-horizontal.png differ
diff --git a/asset/static/css/colorpicker/img/bootstrap-colorpicker/alpha.png b/asset/static/css/colorpicker/img/bootstrap-colorpicker/alpha.png
new file mode 100755
index 0000000..38043f1
Binary files /dev/null and b/asset/static/css/colorpicker/img/bootstrap-colorpicker/alpha.png differ
diff --git a/asset/static/css/colorpicker/img/bootstrap-colorpicker/hue-horizontal.png b/asset/static/css/colorpicker/img/bootstrap-colorpicker/hue-horizontal.png
new file mode 100755
index 0000000..a0d9add
Binary files /dev/null and b/asset/static/css/colorpicker/img/bootstrap-colorpicker/hue-horizontal.png differ
diff --git a/asset/static/css/colorpicker/img/bootstrap-colorpicker/hue.png b/asset/static/css/colorpicker/img/bootstrap-colorpicker/hue.png
new file mode 100755
index 0000000..d89560e
Binary files /dev/null and b/asset/static/css/colorpicker/img/bootstrap-colorpicker/hue.png differ
diff --git a/asset/static/css/colorpicker/img/bootstrap-colorpicker/saturation.png b/asset/static/css/colorpicker/img/bootstrap-colorpicker/saturation.png
new file mode 100755
index 0000000..594ae50
Binary files /dev/null and b/asset/static/css/colorpicker/img/bootstrap-colorpicker/saturation.png differ
diff --git a/asset/static/css/custom.css b/asset/static/css/custom.css
new file mode 100755
index 0000000..9292238
--- /dev/null
+++ b/asset/static/css/custom.css
@@ -0,0 +1,6367 @@
+body.nav-sm .container.body .left_col {
+ width: 70px;
+ padding: 0;
+ position: fixed;
+ height: 100%;
+ z-index: 200;
+}
+
+body.nav-sm .container.body .right_col {
+ padding: 10px 20px;
+ margin-left: 70px;
+ z-index: 2;
+}
+body.nav-sm .navbar.nav_title {
+ width: 70px;
+}
+
+body.nav-sm .navbar.nav_title a span {
+ display: none;
+}
+
+body.nav-sm .navbar.nav_title a i {
+ font-size: 27px;
+ margin: 13px 0 0 3px;
+}
+
+.site_title i {
+ border: 1px solid #EAEAEA;
+ padding: 5px 6px;
+ border-radius: 50%;
+}
+
+body.nav-sm .main_container .top_nav {
+ padding: 0 !important;
+ display: block;
+ margin-left: 70px;
+ z-index: 2;
+}
+
+body.nav-sm .nav.side-menu li a {
+ text-align: center !important;
+ font-weight: 400;
+ font-size: 10px;
+ padding: 10px 5px;
+}
+
+body.nav-sm .nav.side-menu li.active-sm {
+ border-right: 5px solid #1ABB9C;
+}
+
+body.nav-sm > .nav.side-menu > li.active-sm > a {
+ color: #1ABB9C !important;
+}
+
+body.nav-sm .nav.side-menu li a i.toggle-up {
+ display: none !important;
+}
+
+body.nav-sm .nav.side-menu li a i {
+ font-size: 25px !important;
+ text-align: center;
+ width: 100% !important;
+ margin-bottom: 5px;
+}
+
+body.nav-sm ul.nav.child_menu {
+ left: 100%;
+ position: absolute;
+ top: 0;
+ padding: 10px;
+ width: 210px;
+ z-index: 4000;
+ background: #3E5367;
+ display: none;
+}
+
+body.nav-sm ul.nav.child_menu li {
+ padding-left: 0;
+}
+
+body.nav-sm ul.nav.child_menu li a {
+ text-align: left !important;
+}
+
+body.nav-sm .profile {
+ display: none;
+}
+
+.menu_section {
+ margin-bottom: 35px;
+}
+
+.menu_section h3 {
+ padding-left: 23px;
+ color: #fff;
+ text-transform: uppercase;
+ letter-spacing: .5px;
+ font-weight: bold;
+ font-size: 11px;
+ margin-bottom: 0;
+ margin-top: 0;
+ text-shadow: 1px 1px #000;
+}
+
+.menu_section >ul {
+ margin-top: 10px;
+}
+
+.profile_pic {
+ width: 35%;
+ float: left;
+}
+
+.img-circle.profile_img {
+ width: 70%;
+ background: #fff;
+ margin-left: 15%;
+ z-index: 1000;
+ position: inherit;
+ margin-top: 20px;
+ border: 1px solid rgba(52, 73, 94, 0.44);
+ padding: 4px;
+}
+
+.profile_info {
+ padding: 25px 10px 10px;
+ width: 65%;
+ float: left;
+}
+
+.profile_info span {
+ font-size: 13px;
+ line-height: 30px;
+ color: #BAB8B8;
+}
+
+.profile_info h2 {
+ font-size: 14px;
+ color: #ECF0F1;
+ margin: 0;
+ font-weight: 300;
+}
+
+.profile.img_2 {
+ text-align: center;
+}
+
+.profile.img_2 .profile_pic {
+ width: 100%;
+}
+
+.profile.img_2 .profile_pic .img-circle.profile_img {
+ width: 50%;
+ margin: 10px 0 0;
+}
+
+.profile.img_2 .profile_info {
+ padding: 15px 10px 0;
+ width: 100%;
+ margin-bottom: 10px;
+ float: left;
+}
+
+#sidebar-menu span.fa {
+ float: right;
+ text-align: center;
+ margin-top: 5px;
+ font-size: 10px !important;
+ min-width: inherit;
+ color: #C4CFDA;
+}
+
+.active a span.fa {
+ text-align: right !important;
+ margin-right: 4px;
+}
+
+body.nav-sm .menu_section {
+ margin: 0;
+}
+
+body.nav-sm span.fa,
+body.nav-sm .menu_section h3 {
+ display: none !important;
+}
+
+.nav_menu {
+ float: left;
+ /*background: #F4F6F9;
+ border-bottom: 1px solid #E6E9ED;*/
+
+ background: #EDEDED;
+ border-bottom: 1px solid #D9DEE4;
+ margin-bottom: 10px;
+ width: 100%;
+}
+
+body.nav-md .container.body .col-md-3.left_col {
+ width: 230px;
+ padding: 0;
+ position: absolute;
+ display: flex;
+}
+
+body .container.body .right_col {
+ background: #F7F7F7;
+}
+
+body.nav-md .container.body .right_col {
+ padding: 10px 20px 0;
+ margin-left: 230px;
+}
+
+.nav_title {
+ width: 230px;
+ float: left;
+ background: #2A3F54;
+ border-radius: 0;
+ height: 57px;
+}
+
+@media (max-width: 991px) {
+ body.nav-md .container.body .right_col,
+ body.nav-md .container.body .top_nav {
+ width: 100%;
+ margin: 0;
+ }
+ body.nav-md .container.body .col-md-3.left_col {
+ display: none;
+ }
+ body.nav-md .container.body .right_col {
+ width: 100%;
+ padding-right: 0
+ }
+ .right_col {
+ padding: 10px !important;
+ }
+}
+
+@media (max-width: 1200px) {
+ .x_title h2 {
+ width: 62%;
+ font-size: 17px;
+ }
+ .tile,
+ .graph {
+ zoom: 85%;
+ height: inherit;
+ }
+}
+
+@media (max-width: 1270px) and (min-width: 192px) {
+ .x_title h2 small {
+ display: none
+ }
+}
+/** ------------------------------------------ **/
+
+.blue {
+ color: #3498DB;
+}
+
+.purple {
+ color: #9B59B6;
+}
+
+.green {
+ color: #1ABB9C;
+}
+
+.aero {
+ color: #9CC2CB;
+}
+
+.red {
+ color: #E74C3C;
+}
+
+.dark {
+ color: #34495E;
+}
+
+.border-blue {
+ border-color: #3498DB !important;
+}
+
+.border-purple {
+ border-color: #9B59B6 !important;
+}
+
+.border-green {
+ border-color: #1ABB9C !important;
+}
+
+.border-aero {
+ border-color: #9CC2CB !important;
+}
+
+.border-red {
+ border-color: #E74C3C !important;
+}
+
+.border-dark {
+ border-color: #34495E !important;
+}
+
+.bg-white {
+ background: #fff !important;
+ border: 1px solid #fff !important;
+ color: #73879C;
+}
+
+.bg-green {
+ background: #1ABB9C !important;
+ border: 1px solid #1ABB9C !important;
+ color: #fff;
+}
+
+.bg-red {
+ background: #E74C3C !important;
+ border: 1px solid #E74C3C !important;
+ color: #fff;
+}
+
+.bg-blue {
+ background: #3498DB !important;
+ border: 1px solid #3498DB !important;
+ color: #fff;
+}
+
+.bg-orange {
+ background: #F39C12 !important;
+ border: 1px solid #F39C12 !important;
+ color: #fff;
+}
+
+.bg-purple {
+ background: #9B59B6 !important;
+ border: 1px solid #9B59B6 !important;
+ color: #fff;
+}
+
+.bg-blue-sky {
+ background: #50C1CF !important;
+ border: 1px solid #50C1CF !important;
+ color: #fff;
+}
+
+.container {
+ width: 100%;
+ padding: 0
+}
+
+.navbar-nav>li>a,
+.navbar-brand,
+.navbar-nav>li>a {
+ color: #fff !important;
+}
+
+.navbar-brand,
+.navbar-nav>li>a {
+ margin-bottom: 5px
+}
+
+body {
+ color: #73879C;
+ background: #2A3F54;
+ /*#ECF0F1; #FCFCFC*/
+
+ font-family: "Helvetica Neue", Roboto, Arial, "Droid Sans", sans-serif;
+ font-size: 13px;
+ font-weight: 400;
+ line-height: 1.471;
+}
+
+.main_container .top_nav {
+ padding: 0 !important;
+ display: block;
+ padding: 10px 20px 0;
+ margin-left: 230px;
+}
+
+.main_container {}
+
+.no-padding {
+ padding: 0 !important;
+}
+
+.page-title {
+ width: 100%;
+ height: 65px;
+ padding: 10px 0;
+}
+
+.page-title .title_left {
+ width: 45%;
+ float: left;
+ display: block;
+}
+
+.page-title .title_left h3 {
+ margin: 9px 0;
+}
+
+.page-title .title_right {
+ width: 55%;
+ float: left;
+ display: block;
+}
+
+.page-title .title_right .pull-right {
+ margin: 10px 0;
+}
+
+.fixed_height_320 {
+ height: 320px;
+}
+
+.fixed_height_390 {
+ height: 390px;
+}
+
+.fixed_height_200 {
+ height: 200px;
+}
+
+.overflow_hidden {
+ overflow: hidden
+}
+
+.progress-bar-dark {
+ background-color: #34495E !important;
+}
+
+.progress-bar-gray {
+ background-color: #BDC3C7 !important;
+}
+
+table.no-margin .progress {
+ margin-bottom: 0;
+}
+
+.main_content {
+ padding: 10px 20px;
+}
+
+.col-md-55 {
+ width: 50%;
+ margin-bottom: 10px;
+}
+
+@media (max-width: 992px) {
+ .tile_stats_count {
+ margin-bottom: 10px;
+ border-bottom: 1px solid #D9DEE4;
+ padding-bottom: 10px;
+ }
+}
+
+@media (min-width: 992px) and (max-width: 1100px) {
+ .tile_stats_count .count {
+ font-size: 35px !important;
+ }
+}
+
+@media(max-width:768px) {
+ .tile_stats_count .count {
+ font-size: 30px !important;
+ }
+ .tile_stats_count .right span {
+ font-size: 12px;
+ }
+}
+
+@media (min-width: 768px) {
+ .col-md-55 {
+ width: 20%;
+ }
+}
+
+@media (min-width: 992px) {
+ .col-md-55 {
+ width: 20%;
+ }
+}
+
+@media (min-width: 1200px) {
+ .col-md-55 {
+ width: 20%;
+ }
+}
+
+@media (min-width: 192px) and (max-width: 1270px) {
+ .hidden-small {
+ display: none !important;
+ }
+ table.tile_info span.right {
+ margin-right: 7px;
+ float: left;
+ }
+}
+
+.center-margin {
+ margin: 0 auto;
+ float: none!important;
+}
+
+.col-md-55,
+.col-xs-1,
+.col-sm-1,
+.col-md-1,
+.col-lg-1,
+.col-xs-2,
+.col-sm-2,
+.col-md-2,
+.col-lg-2,
+.col-xs-3,
+.col-sm-3,
+.col-md-3,
+.col-lg-3,
+.col-xs-4,
+.col-sm-4,
+.col-md-4,
+.col-lg-4,
+.col-xs-5,
+.col-sm-5,
+.col-md-5,
+.col-lg-5,
+.col-xs-6,
+.col-sm-6,
+.col-md-6,
+.col-lg-6,
+.col-xs-7,
+.col-sm-7,
+.col-md-7,
+.col-lg-7,
+.col-xs-8,
+.col-sm-8,
+.col-md-8,
+.col-lg-8,
+.col-xs-9,
+.col-sm-9,
+.col-md-9,
+.col-lg-9,
+.col-xs-10,
+.col-sm-10,
+.col-md-10,
+.col-lg-10,
+.col-xs-11,
+.col-sm-11,
+.col-md-11,
+.col-lg-11,
+.col-xs-12,
+.col-sm-12,
+.col-md-12,
+.col-lg-12 {
+ position: relative;
+ min-height: 1px;
+ float: left;
+ padding-right: 10px;
+ padding-left: 10px;
+}
+
+.row {
+ margin-right: -10px;
+ margin-left: -10px;
+}
+
+.grid_slider .col-md-6 {
+ padding: 0 40px;
+}
+
+h1,
+.h1,
+h2,
+.h2,
+h3,
+.h3 {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+
+a {
+ color: #5A738E;
+ text-decoration: none;
+}
+
+a,
+a:visited,
+a:focus,
+a:active,
+:visited,
+:focus,
+:active,
+.btn:focus,
+.btn:active:focus,
+.btn.active:focus,
+.btn.focus,
+.btn:active.focus,
+.btn.active.focus {
+ outline: 0;
+}
+
+a:hover,
+a:focus {
+ text-decoration: none
+}
+
+.navbar {
+ margin-bottom: 0
+}
+
+.navbar-header {
+ background: #34495E;
+}
+
+.navbar-right {
+ margin-right: 0;
+}
+
+.top_nav .navbar-right {
+ margin: 0;
+ width: 70%;
+ float: right;
+}
+
+.top_nav .navbar-right li {
+ display: inline-block;
+ float: right;
+}
+
+.top_nav .dropdown-menu {
+ min-width: 220px;
+}
+
+.top_nav .dropdown-menu li {
+ width: 100%;
+}
+
+.top_nav .dropdown-menu li a {
+ width: 100%;
+ padding: 12px 20px;
+}
+
+.top_nav li a i {
+ font-size: 15px
+}
+
+.navbar-static-top {
+ position: fixed;
+ top: 0;
+ width: 100%;
+}
+
+.sidebar-header {
+ border-bottom: 0;
+ margin-top: 46px;
+}
+
+.sidebar-header:first-of-type {
+ margin-top: 0
+}
+
+.nav.side-menu> li {
+ position: relative;
+ display: block;
+}
+
+.nav.side-menu> li > a {
+ margin-bottom: 6px;
+}
+
+.nav.side-menu> li > a:hover {
+ color: #F2F5F7 !important;
+}
+
+.nav.side-menu>li>a:hover,
+.nav>li>a:focus {
+ text-decoration: none;
+ background: transparent;
+}
+
+.nav.child_menu li:hover {
+ background-color: rgba(255, 255, 255, 0.06);
+}
+
+.nav.child_menu li {
+ padding-left: 36px
+}
+
+body.nav-md ul.nav.child_menu li:before {
+ background: #425668;
+ bottom: auto;
+ content: "";
+ height: 8px;
+ left: 23px;
+ margin-top: 15px;
+ position: absolute;
+ right: auto;
+ width: 8px;
+ z-index: 1;
+ -webkit-border-radius: 50%;
+ -moz-border-radius: 50%;
+ border-radius: 50%;
+}
+
+body.nav-md ul.nav.child_menu li:after {
+ border-left: 1px solid #425668;
+ bottom: 0;
+ content: "";
+ left: 27px;
+ position: absolute;
+ top: 0;
+}
+
+.nav.side-menu>li>a,
+.nav.child_menu>li>a {
+ color: #E7E7E7 !important;
+ font-weight: 500;
+}
+
+.nav>li>a {
+ position: relative;
+ display: block;
+ padding: 13px 15px 7px;
+}
+
+.nav.side-menu> li.current-page,
+.nav.side-menu> li.active {
+ border-right: 5px solid #1ABB9C;
+}
+
+li.current-page {
+ background: rgba(255, 255, 255, 0.05);
+}
+
+.nav.side-menu> li.active > a {
+ color: #ff0000;
+ text-shadow: rgba(0, 0, 0, 0.25) 0 -1px 0;
+ background: -webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, #5b6479), color-stop(100%, #4c5566)), #686e78;
+ background: -webkit-linear-gradient(#334556, #2C4257), #2A3F54;
+ background: -moz-linear-gradient(#334556, #2C4257), #2A3F54;
+ background: -o-linear-gradient(#334556, #2C4257), #2A3F54;
+ background: linear-gradient(#334556, #2C4257), #2A3F54;
+ -webkit-box-shadow: rgba(0, 0, 0, 0.25) 0 1px 0, inset rgba(255, 255, 255, 0.16) 0 1px 0;
+ -moz-box-shadow: rgba(0, 0, 0, 0.25) 0 1px 0, inset rgba(255, 255, 255, 0.16) 0 1px 0;
+ box-shadow: rgba(0, 0, 0, 0.25) 0 1px 0, inset rgba(255, 255, 255, 0.16) 0 1px 0;
+}
+
+li.current-page a {
+ color: #1ABB9C !important
+}
+
+.navbar-brand,
+.navbar-nav>li>a {
+ font-weight: 500;
+ color: #ECF0F1 !important;
+ margin-left: 0 !important;
+ line-height: 32px;
+}
+
+.site_title {
+ text-overflow: ellipsis;
+ overflow: hidden;
+ font-weight: 400;
+ font-size: 22px;
+ width: 100%;
+ color: #ECF0F1 !important;
+ margin-left: 0 !important;
+ line-height: 59px;
+ display: block;
+ height: 55px;
+ margin: 0;
+ padding-left: 10px;
+}
+
+.site_title:hover,
+.site_title:focus {
+ text-decoration: none
+}
+
+.nav.navbar-nav>li>a {
+ color: #515356 !important;
+}
+
+.nav.top_menu>li>a {
+ position: relative;
+ display: block;
+ padding: 10px 15px;
+ color: #34495E !important;
+}
+
+.nav>li>a:hover,
+.nav>li>a:focus {
+ background-color: transparent;
+}
+
+.top_search {
+ padding: 0;
+}
+
+.top_search .form-control {
+ border-right: 0;
+ box-shadow: inset 0 1px 0px rgba(0, 0, 0, 0.075);
+ border-radius: 25px 0px 0px 25px;
+ padding-left: 20px;
+ border: 1px solid rgba(221, 226, 232, 0.49);
+}
+
+.top_search .form-control:focus {
+ border: 1px solid rgba(221, 226, 232, 0.49);
+ border-right: 0;
+}
+
+.top_search .input-group-btn button {
+ border-radius: 0px 25px 25px 0px;
+ border: 1px solid rgba(221, 226, 232, 0.49);
+ border-left: 0;
+ box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
+ color: #93A2B2;
+ margin-bottom: 0 !important;
+}
+
+.toggle {
+ float: left;
+ margin: 0;
+ padding-top: 16px;
+ width: 70px;
+}
+
+.toggle a {
+ padding: 15px 15px 0;
+ margin: 0;
+}
+
+.toggle a i {
+ font-size: 26px;
+}
+
+.nav.child_menu > li > a {
+ color: rgba(255, 255, 255, 0.75) !important;
+ font-size: 12px;
+ padding: 9px;
+}
+
+.panel_toolbox {
+ float: right;
+ min-width: 70px;
+}
+
+.panel_toolbox>li {
+ float: left;
+}
+
+.panel_toolbox>li>a {
+ padding: 5px;
+ color: #C5C7CB;
+ font-size: 14px;
+}
+
+.panel_toolbox>li>a:hover {
+ background: #F5F7FA;
+}
+
+.line_30 {
+ line-height: 30px;
+}
+
+.main_menu_side {
+ padding: 0;
+}
+
+.bs-docs-sidebar .nav>li>a {
+ display: block;
+ padding: 4px 6px;
+}
+
+.x_content canvas {}
+
+footer {
+ background: #fff;
+ padding: 5px 20px 0;
+ height: 45px;
+ margin: 0 -17px;
+}
+
+.tile-stats.sparkline {
+ padding: 10px;
+ text-align: center;
+}
+
+.jqstooltip {
+ background: #34495E !important;
+ width: 30px !important;
+ height: 22px !important;
+ text-decoration: none;
+}
+
+a.btn-success,
+a.btn-primary,
+a.btn-warning,
+a.btn-danger {
+ color: #fff !important;
+}
+
+.btn {
+ border-radius: 3px;
+}
+
+.tooltip {
+ display: block !important;
+}
+
+.tiles {
+ border-top: 1px solid #ccc;
+ margin-top: 15px;
+ padding-top: 5px;
+ margin-bottom: 0;
+}
+
+.tile {
+ overflow: hidden;
+}
+
+.top_tiles {
+ margin-bottom: 0;
+}
+
+.top_tiles .tile span {}
+
+.top_tiles .tile h2 {
+ font-size: 30px;
+ line-height: 30px;
+ margin: 3px 0 7px;
+ font-weight: bold;
+}
+
+article.media {
+ width: 100%;
+}
+/* ********* custom accordion **************************** */
+
+*,
+*:before,
+*:after {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+
+#integration-list {
+ width: 100%;
+ margin: 0 auto;
+ display: table;
+}
+
+#integration-list ul {
+ padding: 0;
+ margin: 20px 0;
+ color: #555;
+}
+
+#integration-list ul > li {
+ list-style: none;
+ border-top: 1px solid #ddd;
+ display: block;
+ padding: 15px;
+ overflow: hidden;
+}
+
+#integration-list ul:last-child {
+ border-bottom: 1px solid #ddd;
+}
+
+#integration-list ul > li:hover {
+ background: #efefef;
+}
+
+.expand {
+ display: block;
+ text-decoration: none;
+ color: #555;
+ cursor: pointer;
+}
+
+.expand h2 {
+ width: 85%;
+ float: left;
+}
+
+h2 {
+ font-size: 18px;
+ font-weight: 400;
+}
+
+#left,
+#right {
+ display: table;
+}
+
+#sup {
+ display: table-cell;
+ vertical-align: middle;
+ width: 80%;
+}
+
+.detail a {
+ text-decoration: none;
+ color: #C0392B;
+ border: 1px solid #C0392B;
+ padding: 6px 10px 5px;
+ font-size: 13px;
+ margin-right: 7px;
+}
+
+.detail {
+ margin: 10px 0 10px 0px;
+ display: none;
+ line-height: 22px;
+ height: 150px;
+}
+
+.detail span {
+ margin: 0;
+}
+
+.right-arrow {
+ width: 10px;
+ float: right;
+ font-weight: bold;
+ font-size: 20px;
+}
+
+.accordion .panel {
+ margin-bottom: 5px;
+ border-radius: 0;
+ border-bottom: 1px solid #efefef;
+}
+
+.accordion .panel-heading {
+ background: #F2F5F7;
+ padding: 13px;
+ width: 100%;
+ display: block;
+}
+
+.accordion .panel:hover {
+ background: #F2F5F7;
+}
+
+.x_panel {
+ position: relative;
+ width: 100%;
+ margin-bottom: 10px;
+ padding: 10px 17px;
+ display: inline-block;
+ background: #fff;
+ border: 1px solid #E6E9ED;
+ -webkit-column-break-inside: avoid;
+ -moz-column-break-inside: avoid;
+ column-break-inside: avoid;
+ opacity: 1;
+ -moz-transition: all .2s ease;
+ -o-transition: all .2s ease;
+ -webkit-transition: all .2s ease;
+ -ms-transition: all .2s ease;
+ transition: all .2s ease;
+}
+
+.x_title {
+ border-bottom: 2px solid #E6E9ED;
+ padding: 1px 5px 6px;
+ margin-bottom: 10px;
+}
+
+.x_title .filter {
+ width: 40%;
+ float: right;
+}
+
+.x_title h2 {
+ margin: 5px 0 6px;
+ float: left;
+ display: block;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+}
+
+.x_title h2 small {
+ margin-left: 10px;
+}
+
+.x_title span {
+ color: #BDBDBD;
+}
+
+.x_content {
+ padding: 0 5px 6px;
+ position: relative;
+ width: 100%;
+ float: left;
+ clear: both;
+ margin-top: 5px;
+}
+
+.x_content h4 {
+ font-size: 16px;
+ font-weight: 500;
+}
+
+legend {
+ padding-bottom: 7px;
+}
+
+.modal-title {
+ margin: 0;
+ line-height: 1.42857143;
+}
+
+.demo-placeholder {
+ height: 280px;
+}
+/* ********* contacts ********************************** */
+.profile_view {
+ margin-bottom: 20px;
+ display: inline-block;
+ width: 100%;
+}
+.well.profile_view {
+ padding: 10px 0 0;
+}
+
+.well.profile_view .divider {
+ border-top: 1px solid #e5e5e5;
+ padding-top: 5px;
+ margin-top: 5px;
+}
+
+.well.profile_view .ratings {
+ margin-bottom: 0;
+}
+
+.pagination.pagination-split li {
+ display: inline-block;
+ margin-right: 3px;
+}
+
+.pagination.pagination-split li a {
+ border-radius: 4px;
+ color: #768399;
+ -moz-border-radius: 4px;
+ -webkit-border-radius: 4px;
+}
+
+.well.profile_view {
+ background: #fff;
+}
+
+.well.profile_view .bottom {
+ margin-top: -20px;
+ background: #F2F5F7;
+ padding: 9px 0;
+ border-top: 1px solid #E6E9ED;
+}
+
+.well.profile_view .left {
+ margin-top: 20px;
+}
+
+.well.profile_view .left p {
+ margin-bottom: 3px;
+}
+
+.well.profile_view .right {
+ margin-top: 0px;
+ padding: 10px;
+}
+
+.well.profile_view .img-circle {
+ border: 1px solid #E6E9ED;
+ padding: 2px;
+}
+
+.well.profile_view h2 {
+ margin: 5px 0;
+}
+
+.well.profile_view .ratings {
+ text-align: left;
+ font-size: 16px;
+}
+
+.well.profile_view .brief {
+ margin: 0;
+ font-weight: 300;
+}
+
+.profile_left {
+ background: white;
+}
+/* ********* /contacts ********************************** */
+/* ********* /custom accordion **************************** */
+/* ********* dashboard widget **************************** */
+
+table.tile h3,
+table.tile h4,
+table.tile span {
+ font-weight: bold;
+ vertical-align: middle !important;
+}
+
+table.tile th,
+table.tile td {
+ text-align: center;
+}
+
+table.tile th {
+ border-bottom: 1px solid #E6ECEE;
+}
+
+table.tile td {
+ padding: 5px 0;
+}
+
+table.tile td ul {
+ text-align: left;
+ padding-left: 0
+}
+
+table.tile td ul li {
+ list-style: none;
+ width: 100%;
+}
+
+table.tile td ul li a {
+ width: 100%
+}
+
+table.tile td ul li a big {
+ right: 0;
+ float: right;
+ margin-right: 13px;
+}
+
+table.tile_info {
+ width: 100%;
+}
+
+table.tile_info td {
+ text-align: left;
+ padding: 1px;
+ font-size: 15px
+}
+
+table.tile_info td p {
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ margin: 0;
+ line-height: 28px;
+}
+
+table.tile_info td i {
+ margin-right: 8px;
+ font-size: 17px;
+ float: left;
+ width: 18px;
+ line-height: 28px
+}
+
+table.tile_info td:first-child {
+ width: 83%;
+}
+
+td span {
+ line-height: 28px;
+}
+
+.sidebar-widget {
+ overflow: hidden;
+}
+
+.error-number {
+ font-size: 90px;
+ line-height: 90px;
+ margin: 20px 0;
+}
+
+.col-middle {
+ margin-top: 5%;
+}
+
+.mid_center {
+ width: 370px;
+ margin: 0 auto;
+ text-align: center;
+ padding: 10px 20px;
+}
+
+h3.degrees {
+ font-size: 22px;
+ font-weight: 400;
+ text-align: center;
+}
+
+.degrees:after {
+ content: "o";
+ position: relative;
+ top: -12px;
+ font-size: 13px;
+ font-weight: 300;
+}
+
+.daily-weather .day {
+ font-size: 14px;
+ border-top: 2px solid rgba(115, 135, 156, 0.36);
+ text-align: center;
+ border-bottom: 2px solid rgba(115, 135, 156, 0.36);
+ padding: 5px 0;
+}
+
+.weather-days .col-sm-2 {
+ overflow: hidden;
+ width: 16.66666667%;
+}
+
+.weather .row {
+ margin-bottom: 0
+}
+/* ********* tables styling ******************************* */
+
+.bulk-actions {
+ display: none;
+}
+
+table.countries_list {
+ width: 100%;
+}
+
+table.countries_list td {
+ padding: 0 10px;
+ line-height: 30px;
+ border-top: 1px solid #eeeeee;
+}
+
+.dataTables_paginate a {
+ padding: 6px 9px !important;
+ background: #ddd !important;
+ border-color: #ddd !important;
+}
+
+.paging_full_numbers a.paginate_active {
+ background-color: rgba(38, 185, 154, 0.59) !important;
+ border-color: rgba(38, 185, 154, 0.59) !important;
+}
+
+button.DTTT_button,
+div.DTTT_button,
+a.DTTT_button {
+ border: 1px solid #E7E7E7 !important;
+ background: #E7E7E7 !important;
+ box-shadow: none !important;
+}
+
+table.jambo_table {
+ border: 1px solid rgba(221, 221, 221, 0.78);
+}
+
+table.jambo_table thead {
+ background: rgba(52, 73, 94, 0.94);
+ color: #ECF0F1;
+}
+
+table.jambo_table tbody tr:hover td {
+ background: rgba(38, 185, 154, 0.07);
+ border-top: 1px solid rgba(38, 185, 154, 0.11);
+ border-bottom: 1px solid rgba(38, 185, 154, 0.11);
+}
+
+table.jambo_table tbody tr.selected {
+ background: rgba(38, 185, 154, 0.16);
+}
+
+table.jambo_table tbody tr.selected td {
+ border-top: 1px solid rgba(38, 185, 154, 0.40);
+ border-bottom: 1px solid rgba(38, 185, 154, 0.40);
+}
+
+.dataTables_paginate a {
+ background: #ff0000;
+}
+
+.dataTables_wrapper {
+ position: relative;
+ clear: both;
+ zoom: 1;
+ /* Feeling sorry for IE */
+}
+
+.dataTables_processing {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 250px;
+ height: 30px;
+ margin-left: -125px;
+ margin-top: -15px;
+ padding: 14px 0 2px 0;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 14px;
+ background-color: white;
+}
+
+.dataTables_length {
+ width: 40%;
+ float: left;
+}
+
+.dataTables_filter {
+ width: 50%;
+ float: right;
+ text-align: right;
+}
+
+.dataTables_info {
+ width: 60%;
+ float: left;
+}
+
+.dataTables_paginate {
+ float: right;
+ text-align: right;
+}
+/* Pagination nested */
+
+.paginate_disabled_previous,
+.paginate_enabled_previous,
+.paginate_disabled_next,
+.paginate_enabled_next {
+ height: 19px;
+ float: left;
+ cursor: pointer;
+ *cursor: hand;
+ color: #111 !important;
+}
+
+.paginate_disabled_previous:hover,
+.paginate_enabled_previous:hover,
+.paginate_disabled_next:hover,
+.paginate_enabled_next:hover {
+ text-decoration: none !important;
+}
+
+.paginate_disabled_previous:active,
+.paginate_enabled_previous:active,
+.paginate_disabled_next:active,
+.paginate_enabled_next:active {
+ outline: none;
+}
+
+.paginate_disabled_previous,
+.paginate_disabled_next {
+ color: #666 !important;
+}
+
+.paginate_disabled_previous,
+.paginate_enabled_previous {
+ padding-left: 23px;
+}
+
+.paginate_disabled_next,
+.paginate_enabled_next {
+ padding-right: 23px;
+ margin-left: 10px;
+}
+
+.paginate_disabled_previous {
+ background: url('../images/back_disabled.png') no-repeat top left;
+}
+
+.paginate_enabled_previous {
+ background: url('../images/back_enabled.png') no-repeat top left;
+}
+
+.paginate_enabled_previous:hover {
+ background: url('../images/back_enabled_hover.png') no-repeat top left;
+}
+
+.paginate_disabled_next {
+ background: url('../images/forward_disabled.png') no-repeat top right;
+}
+
+.paginate_enabled_next {
+ background: url('../images/forward_enabled.png') no-repeat top right;
+}
+
+.paginate_enabled_next:hover {
+ background: url('../images/forward_enabled_hover.png') no-repeat top right;
+}
+
+table.display {
+ margin: 0 auto;
+ clear: both;
+ width: 100%;
+}
+
+table.display thead th {
+ padding: 8px 18px 8px 10px;
+ border-bottom: 1px solid black;
+ font-weight: bold;
+ cursor: pointer;
+ cursor: hand;
+}
+
+table.display tfoot th {
+ padding: 3px 18px 3px 10px;
+ border-top: 1px solid black;
+ font-weight: bold;
+}
+
+table.display tr.heading2 td {
+ border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+ padding: 3px 10px;
+}
+
+table.display td.center {
+ text-align: center;
+}
+
+.sorting_asc {
+ background: url('../images/sort_asc.png') no-repeat center right;
+}
+
+.sorting_desc {
+ background: url('../images/sort_desc.png') no-repeat center right;
+}
+
+.sorting {
+ background: url('../images/sort_both.png') no-repeat center right;
+}
+
+.sorting_asc_disabled {
+ background: url('../images/sort_asc_disabled.png') no-repeat center right;
+}
+
+.sorting_desc_disabled {
+ background: url('../images/sort_desc_disabled.png') no-repeat center right;
+}
+
+table.display thead th:active,
+table.display thead td:active {
+ outline: none;
+}
+
+.dataTables_scroll {
+ clear: both;
+}
+
+.dataTables_scrollBody {
+ *margin-top: -1px;
+ -webkit-overflow-scrolling: touch;
+}
+
+.top,
+.bottom {}
+
+.top .dataTables_info {
+ float: none;
+}
+
+.clear {
+ clear: both;
+}
+
+.dataTables_empty {
+ text-align: center;
+}
+
+tfoot input {
+ margin: 0.5em 0;
+ width: 100%;
+ color: #444;
+}
+
+tfoot input.search_init {
+ color: #999;
+}
+
+td.group {
+ background-color: #d1cfd0;
+ border-bottom: 2px solid #A19B9E;
+ border-top: 2px solid #A19B9E;
+}
+
+td.details {
+ background-color: #d1cfd0;
+ border: 2px solid #A19B9E;
+}
+
+.example_alt_pagination div.dataTables_info {
+ width: 40%;
+}
+
+.paging_full_numbers {
+ width: 400px;
+ height: 22px;
+ line-height: 22px;
+}
+
+.paging_full_numbers a:active {
+ outline: none
+}
+
+.paging_full_numbers a:hover {
+ text-decoration: none;
+}
+
+.paging_full_numbers a.paginate_button,
+.paging_full_numbers a.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+}
+
+.paging_full_numbers a.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers a.paginate_button:hover {
+ background-color: #ccc;
+ text-decoration: none !important;
+}
+
+.paging_full_numbers a.paginate_active {
+ background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+ background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+ background-color: #9FAFD1;
+}
+
+div.box {
+ height: 100px;
+ padding: 10px;
+ overflow: auto;
+ border: 1px solid #8080FF;
+ background-color: #E5E5FF;
+}
+/* ********* /tables styleing ****************************** */
+/* ********* /dashboard widget **************************** */
+/* ********* widgets *************************************** */
+
+ul.msg_list li {
+ background: #f7f7f7;
+ padding: 5px;
+ display: flex;
+ margin: 6px 6px 0;
+ width: 96% !important;
+}
+
+ul.msg_list li:last-child {
+ margin-bottom: 6px;
+ padding: 10px;
+}
+
+ul.msg_list li a {
+ padding: 3px 5px !important;
+}
+
+ul.msg_list li a .image img {
+ border-radius: 2px 2px 2px 2px;
+ -webkit-border-radius: 2px 2px 2px 2px;
+ float: left;
+ margin-right: 10px;
+ width: 11%;
+}
+
+ul.msg_list li a .time {
+ font-size: 11px;
+ font-style: italic;
+ font-weight: bold;
+ position: absolute;
+ right: 35px;
+}
+
+ul.msg_list li a .message {
+ display: block !important;
+ font-size: 11px;
+}
+
+.dropdown-menu.msg_list {
+ width: 300px !important;
+}
+
+.dropdown-menu.msg_list span {
+ white-space: normal;
+}
+
+.dropdown-menu {
+ border: medium none;
+ border-radius: 3px;
+ box-shadow: 0 0 3px;
+ display: none;
+ float: left;
+ font-size: 12px;
+ left: 0;
+ list-style: none outside none;
+ padding: 0;
+ position: absolute;
+ text-shadow: none;
+ top: 100%;
+ z-index: 1000;
+ border-radius: 2px;
+ border-top: 1px solid #999999;
+}
+
+.dropdown-menu:after {
+ border-bottom: 6px solid #999999;
+ border-left: 6px solid transparent;
+ border-right: 6px solid transparent;
+ content: "";
+ display: inline-block;
+ right: 6%;
+ position: absolute;
+ top: -6px;
+}
+
+.dropdown-menu>li>a {
+ color: #5A738E;
+}
+
+.navbar-nav .open .dropdown-menu {
+ position: absolute;
+ background: #fff;
+ margin-top: 0;
+ border: 1px solid #D9DEE4;
+ -webkit-box-shadow: none;
+}
+
+.info-number .badge {
+ font-size: 10px;
+ font-weight: normal;
+ line-height: 13px;
+ padding: 2px 6px;
+ position: absolute;
+ right: 2px;
+ top: 8px;
+}
+
+ul.to_do {
+ padding: 0;
+}
+
+ul.to_do li {
+ background: #f3f3f3;
+ -webkit-border-radius: 3px;
+ -moz-border-radius: 3px;
+ border-radius: 3px;
+ position: relative;
+ padding: 7px;
+ margin-bottom: 5px;
+ list-style: none;
+}
+
+ul.to_do p {
+ margin: 0;
+}
+
+.dashboard-widget {
+ background: #f6f6f6;
+ border-top: 5px solid #79C3DF;
+ border-radius: 3px;
+ padding: 5px 10px 10px;
+}
+
+.dashboard-widget .dashboard-widget-title {
+ font-weight: normal;
+ border-bottom: 1px solid #c1cdcd;
+ margin: 0 0 10px 0;
+ padding-bottom: 5px;
+ padding-left: 40px;
+ line-height: 30px;
+}
+
+.dashboard-widget .dashboard-widget-title i {
+ font-size: 100%;
+ margin-left: -35px;
+ margin-right: 10px;
+ color: #33a1c9;
+ padding: 3px 6px;
+ border: 1px solid #abd9ea;
+ border-radius: 5px;
+ background: #fff;
+}
+
+ul.quick-list {
+ width: 45%;
+ padding-left: 0;
+ display: inline-block;
+}
+
+ul.quick-list li {
+ padding-left: 10px;
+ list-style: none;
+ margin: 0;
+ padding-bottom: 6px;
+ padding-top: 4px;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ overflow: hidden;
+}
+
+ul.quick-list li i {
+ padding-right: 10px;
+ color: #757679;
+}
+
+.dashboard-widget-content {
+ padding-top: 9px;
+}
+
+.dashboard-widget-content .sidebar-widget {
+ width: 50%;
+ display: inline-block;
+ vertical-align: top;
+ background: #fff;
+ border: 1px solid #abd9ea;
+ border-radius: 5px;
+ text-align: center;
+ float: right;
+ padding: 2px;
+ margin-top: 10px;
+}
+
+.widget_summary {
+ width: 100%;
+ display: inline-flex;
+}
+
+.widget_summary .w_left {
+ float: left;
+ text-align: left;
+}
+
+.widget_summary .w_center {
+ float: left;
+}
+
+.widget_summary .w_right {
+ float: left;
+ text-align: right;
+}
+
+.widget_summary .w_right span {
+ font-size: 20px;
+}
+
+.w_20 {
+ width: 20%
+}
+
+.w_25 {
+ width: 25%
+}
+
+.w_55 {
+ width: 55%
+}
+
+h5.graph_title {
+ text-align: left;
+ margin-left: 10px
+}
+
+h5.graph_title i {
+ margin-right: 10px;
+ font-size: 17px
+}
+
+span.right {
+ float: right;
+ font-size: 14px !important
+}
+
+.tile_info a {
+ text-overflow: ellipsis;
+}
+
+.sidebar-footer {
+ bottom: 0px;
+ clear: both;
+ display: block;
+ padding: 5px 0 0 0;
+ position: fixed;
+ width: 230px;
+ z-index: 1000;
+ background: #2A3F54;
+}
+
+.sidebar-footer a {
+ padding: 7px 0 3px;
+ text-align: center;
+ width: 25%;
+ font-size: 17px;
+ display: block;
+ float: left;
+ background: #172D44;
+}
+
+.sidebar-footer a:hover {
+ background: #425567;
+}
+
+.tile_count {
+ margin-bottom: 20px;
+ margin-top: 20px;
+}
+
+.tile_count div:first-child .left {
+ border: 0;
+}
+
+.tile_count .tile_stats_count {
+ border-left: 0px solid #333;
+ padding: 0;
+}
+
+.tile_stats_count .left {
+ width: 15%;
+ float: left;
+ height: 65px;
+ border-left: 2px solid #ADB2B5;
+ margin-top: 10px;
+}
+
+.tile_stats_count .right {
+ padding-left: 10px;
+ height: 100%;
+ text-overflow: ellipsis;
+ overflow: hidden;
+}
+
+.tile_stats_count .right span {
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.tile_stats_count .count {
+ font-size: 40px;
+ line-height: 47px;
+ font-weight: 600;
+}
+
+.tile_stats_count .count small {
+ font-size: 20px;
+ line-height: 20px;
+ font-weight: 600;
+}
+
+.count_bottom i {
+ width: 12px;
+}
+
+.dashboard_graph {
+ background: #fff;
+ padding: 7px 10px;
+}
+
+.dashboard_graph .col-md-9,
+.dashboard_graph .col-md-3 {
+ padding: 0;
+}
+
+a.user-profile {
+ color: #5E6974 !important;
+}
+
+.user-profile img {
+ width: 29px;
+ height: 29px;
+ border-radius: 50%;
+ margin-right: 10px;
+}
+
+ul.top_profiles {
+ height: 330px;
+ width: 100%;
+}
+
+ul.top_profiles li {
+ margin: 0;
+ padding: 3px 5px;
+}
+
+ul.top_profiles li:nth-child(odd) {
+ background-color: #eee;
+}
+
+.media .profile_thumb {
+ border: 1px solid;
+ width: 50px;
+ height: 50px;
+ margin: 5px 10px 5px 0;
+ border-radius: 50%;
+ padding: 9px 12px;
+}
+
+.media .profile_thumb i {
+ font-size: 30px;
+}
+
+.media .date {
+ background: #ccc;
+ width: 52px;
+ margin-right: 10px;
+ border-radius: 10px;
+ padding: 5px;
+}
+
+.media .date .month {
+ margin: 0;
+ text-align: center;
+ color: #fff;
+}
+
+.media .date .day {
+ text-align: center;
+ color: #fff;
+ font-size: 27px;
+ margin: 0;
+ line-height: 27px;
+ font-weight: bold;
+}
+
+.event .media-body a.title {
+ font-weight: bold;
+}
+
+.event .media-body p {
+ margin-bottom: 0;
+}
+
+h4.graph_title {
+ margin: 7px;
+ text-align: center;
+}
+/* ********* /widgets *************************************** */
+/* ********* iconts-display **************************** */
+
+.fontawesome-icon-list .fa-hover a:hover {
+ background-color: #ddd;
+ color: #fff;
+ text-decoration: none;
+}
+
+.fontawesome-icon-list .fa-hover a {
+ display: block;
+ line-height: 32px;
+ height: 32px;
+ padding-left: 10px;
+ border-radius: 4px;
+}
+
+.fontawesome-icon-list .fa-hover a:hover .fa {
+ font-size: 28px;
+ vertical-align: -6px;
+}
+
+.fontawesome-icon-list .fa-hover a .fa {
+ width: 32px;
+ font-size: 16px;
+ display: inline-block;
+ text-align: right;
+ margin-right: 10px;
+}
+
+#sidebar-menu .fa {
+ width: 26px;
+ opacity: .99;
+ display: inline-block;
+ font-family: FontAwesome;
+ font-style: normal;
+ font-weight: normal;
+ font-size: 18px;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+/* ********* /iconts-display **************************** */
+/* ********* Tile stats **************************** */
+
+.tile-stats {
+ position: relative;
+ display: block;
+ margin-bottom: 12px;
+ border: 1px solid #E4E4E4;
+ -webkit-border-radius: 5px;
+ overflow: hidden;
+ padding-bottom: 5px;
+ -webkit-background-clip: padding-box;
+ -moz-border-radius: 5px;
+ -moz-background-clip: padding;
+ border-radius: 5px;
+ background-clip: padding-box;
+ background: #FFF;
+ -moz-transition: all 300ms ease-in-out;
+ -o-transition: all 300ms ease-in-out;
+ -webkit-transition: all 300ms ease-in-out;
+ transition: all 300ms ease-in-out;
+}
+
+.tile-stats:hover .icon i {
+ animation-name: tansformAnimation;
+ animation-duration: .5s;
+ animation-iteration-count: 1;
+ color: rgba(58, 58, 58, 0.41);
+ animation-timing-function: ease;
+ animation-fill-mode: forwards;
+ -webkit-animation-name: tansformAnimation;
+ -webkit-animation-duration: .5s;
+ -webkit-animation-iteration-count: 1;
+ -webkit-animation-timing-function: ease;
+ -webkit-animation-fill-mode: forwards;
+ -moz-animation-name: tansformAnimation;
+ -moz-animation-duration: .5s;
+ -moz-animation-iteration-count: 1;
+ -moz-animation-timing-function: ease;
+ -moz-animation-fill-mode: forwards;
+}
+
+.tile-stats .icon {
+ color: #BAB8B8;
+ position: absolute;
+ right: 53px;
+ top: 22px;
+ z-index: 1;
+}
+
+.tile-stats .icon i {
+ margin: 0;
+ font-size: 60px;
+ line-height: 0;
+ vertical-align: bottom;
+ padding: 0;
+}
+
+.tile-stats .count {
+ font-size: 38px;
+ font-weight: bold;
+ line-height: 1.65857143
+}
+
+.tile-stats .count,
+.tile-stats h3,
+.tile-stats p {
+ position: relative;
+ margin: 0;
+ margin-left: 10px;
+ z-index: 5;
+ padding: 0;
+}
+
+.tile-stats h3 {
+ color: #BAB8B8;
+}
+
+.tile-stats p {
+ margin-top: 5px;
+ font-size: 12px;
+}
+
+.tile-stats > .dash-box-footer {
+ position: relative;
+ text-align: center;
+ margin-top: 5px;
+ padding: 3px 0;
+ color: #fff;
+ color: rgba(255, 255, 255, 0.8);
+ display: block;
+ z-index: 10;
+ background: rgba(0, 0, 0, 0.1);
+ text-decoration: none;
+}
+
+.tile-stats > .dash-box-footer:hover {
+ color: #fff;
+ background: rgba(0, 0, 0, 0.15);
+}
+
+.tile-stats > .dash-box-footer:hover {
+ color: #fff;
+ background: rgba(0, 0, 0, 0.15);
+}
+
+table.tile_info {
+ padding: 10px 15px;
+}
+
+table.tile_info span.right {
+ margin-right: 0;
+ float: right;
+ position: absolute;
+ right: 4%;
+}
+
+.tile:hover {
+ text-decoration: none;
+}
+
+.tile_header {
+ border-bottom: transparent;
+ padding: 7px 15px;
+ margin-bottom: 15px;
+ background: #E7E7E7;
+}
+
+.tile_head h4 {
+ margin-top: 0;
+ margin-bottom: 5px;
+}
+
+.tiles-bottom {
+ padding: 5px 10px;
+ margin-top: 10px;
+ background: rgba(194, 194, 194, 0.3);
+ text-align: left;
+}
+/* ********* /Tile stats **************************** */
+/* ********* /inbox design **************************** */
+
+a.star {
+ color: #428bca !important
+}
+
+.mail_content {
+ background: none repeat scroll 0 0 #FFFFFF;
+ border-radius: 4px;
+ margin-top: 20px;
+ min-height: 500px;
+ padding: 10px 11px;
+ width: 100%;
+}
+
+.list-btn-mail {
+ margin-bottom: 15px;
+}
+
+.list-btn-mail.active {
+ border-bottom: 1px solid #39B3D7;
+ padding: 0 0 14px;
+}
+
+.list-btn-mail > i {
+ float: left;
+ font-size: 18px;
+ font-style: normal;
+ width: 33px;
+}
+
+.list-btn-mail > .cn {
+ background: none repeat scroll 0 0 #39B3D7;
+ border-radius: 12px;
+ color: #FFFFFF;
+ float: right;
+ font-style: normal;
+ padding: 0 5px;
+}
+
+.button-mail {
+ margin: 0 0 15px !important;
+ text-align: left;
+ width: 100%;
+}
+
+.buttons,
+button,
+.btn {
+ margin-bottom: 5px;
+ margin-right: 5px;
+}
+
+.btn-group-vertical .btn,
+.btn-group .btn {
+ margin-bottom: 0;
+ margin-right: 0;
+}
+
+.mail_list_column {
+ border-left: 1px solid #DBDBDB;
+}
+
+.mail_view {
+ border-left: 1px solid #DBDBDB
+}
+
+.mail_list {
+ width: 100%;
+ border-bottom: 1px solid #DBDBDB;
+ margin-bottom: 2px;
+ display: inline-block;
+}
+
+.mail_list .left {
+ width: 5%;
+ float: left;
+ margin-right: 3%
+}
+
+.mail_list .right {
+ width: 90%;
+ float: left
+}
+
+.mail_list h3 {
+ font-size: 15px;
+ font-weight: bold;
+ margin: 0px 0 6px;
+}
+
+.mail_list h3 small {
+ float: right;
+ color: #ADABAB;
+ font-size: 11px;
+ line-height: 20px;
+}
+
+.mail_list .badge {
+ padding: 3px 6px;
+ font-size: 8px;
+ background: #BAB7B7
+}
+
+@media (max-width: 767px) {
+ .mail_list {
+ margin-bottom: 5px;
+ display: inline-block;
+ }
+}
+
+.mail_heading h4 {
+ font-size: 18px;
+ border-bottom: 1px solid #ddd;
+ padding-bottom: 10px;
+ margin-top: 20px;
+}
+
+.attachment {
+ margin-top: 30px;
+}
+
+.attachment ul {
+ width: 100%;
+ list-style: none;
+ padding-left: 0;
+ display: inline-block;
+ margin-bottom: 30px;
+}
+
+.attachment ul li {
+ float: left;
+ width: 150px;
+ margin-right: 10px;
+ margin-bottom: 10px;
+}
+
+.attachment ul li img {
+ height: 150px;
+ border: 1px solid #ddd;
+ padding: 5px;
+ margin-bottom: 10px;
+}
+
+.attachment ul li span {
+ float: right;
+}
+
+.attachment .file-name {
+ float: left;
+}
+
+.attachment .links {
+ width: 100%;
+ display: inline-block;
+}
+/* ********* /inbox design **************************** */
+/* ********* form design **************************** */
+
+.editor.btn-toolbar {
+ zoom: 1;
+ background: #F7F7F7;
+ margin: 5px 2px;
+ padding: 3px 0;
+ border: 1px solid #EFEFEF;
+}
+
+.input-group {
+ margin-bottom: 10px;
+}
+
+.ln_solid {
+ border-top: 1px solid #e5e5e5;
+ color: #ffffff;
+ background-color: #ffffff;
+ height: 1px;
+ margin: 20px 0;
+}
+
+span.section {
+ display: block;
+ width: 100%;
+ padding: 0;
+ margin-bottom: 20px;
+ font-size: 21px;
+ line-height: inherit;
+ color: #333;
+ border: 0;
+ border-bottom: 1px solid #e5e5e5;
+}
+
+.form-control {
+ border-radius: 0;
+ line-height: 30px;
+ width: 100%;
+}
+
+.form-horizontal .control-label {
+ padding-top: 8px
+}
+
+.form-control:focus {
+ border-color: #CCD0D7;
+ box-shadow: none !important;
+}
+
+legend {
+ font-size: 18px;
+ color: inherit;
+}
+
+.checkbox {}
+
+.form-horizontal .form-group {
+ margin-right: 0;
+ margin-left: 0;
+}
+
+.form-control-feedback {
+ margin-top: 8px;
+ height: 23px;
+ color: #bbb;
+ line-height: 24px;
+ font-size: 15px;
+}
+
+.form-control-feedback.left {
+ border-right: 1px solid #ccc;
+ left: 13px;
+}
+
+.form-control-feedback.right {
+ border-left: 1px solid #ccc;
+ right: 13px;
+}
+
+.form-control.has-feedback-left {
+ padding-left: 45px;
+}
+
+.form-control.has-feedback-right {
+ padding-right: 45px;
+}
+
+.form-group {
+ margin-bottom: 10px;
+}
+
+.validate {
+ margin-top: 10px;
+}
+
+.invalid-form-error-message {
+ margin-top: 10px;
+ padding: 5px;
+}
+
+.invalid-form-error-message.filled {
+ border-left: 2px solid #E74C3C;
+}
+
+p.parsley-success {
+ color: #468847;
+ background-color: #DFF0D8;
+ border: 1px solid #D6E9C6;
+}
+
+p.parsley-error {
+ color: #B94A48;
+ background-color: #F2DEDE;
+ border: 1px solid #EED3D7;
+}
+
+ul.parsley-errors-list {
+ list-style: none;
+ color: #E74C3C;
+ padding-left: 0;
+}
+
+input.parsley-error,
+textarea.parsley-error,
+select.parsley-error {
+ background: #FAEDEC;
+ border: 1px solid #E85445;
+}
+
+.btn-group .parsley-errors-list {
+ display: none;
+}
+
+.bad input,
+.bad select,
+.bad textarea {
+ border: 1px solid #CE5454;
+ box-shadow: 0 0 4px -2px #CE5454;
+ position: relative;
+ left: 0;
+ -moz-animation: .7s 1 shake linear;
+ -webkit-animation: 0.7s 1 shake linear;
+}
+
+.item input,
+.item textarea {
+ -webkit-transition: 0.42s;
+ -moz-transition: 0.42s;
+ transition: 0.42s;
+}
+/* alerts (when validation fails) */
+
+.item .alert {
+ float: left;
+ margin: 0 0 0 20px;
+ padding: 3px 10px;
+ color: #FFF;
+ border-radius: 3px 4px 4px 3px;
+ background-color: #CE5454;
+ max-width: 170px;
+ white-space: pre;
+ position: relative;
+ left: -15px;
+ opacity: 0;
+ z-index: 1;
+ transition: 0.15s ease-out;
+}
+
+.item .alert::after {
+ content: '';
+ display: block;
+ height: 0;
+ width: 0;
+ border-color: transparent #CE5454 transparent transparent;
+ border-style: solid;
+ border-width: 11px 7px;
+ position: absolute;
+ left: -13px;
+ top: 1px;
+}
+
+.item.bad .alert {
+ left: 0;
+ opacity: 1;
+}
+/* ***** dropzone ****** */
+
+.dropzone,
+.dropzone * {
+ box-sizing: border-box;
+}
+
+.dropzone {
+ min-height: 150px;
+ border: 2px solid rgba(0, 0, 0, 0.3);
+ background: white;
+ padding: 54px 54px;
+}
+
+.dropzone.dz-clickable {
+ cursor: pointer;
+}
+
+.dropzone.dz-clickable * {
+ cursor: default;
+}
+
+.dropzone.dz-clickable .dz-message,
+.dropzone.dz-clickable .dz-message * {
+ cursor: pointer;
+}
+
+.dropzone.dz-started .dz-message {
+ display: none;
+}
+
+.dropzone.dz-drag-hover {
+ border-style: solid;
+}
+
+.dropzone.dz-drag-hover .dz-message {
+ opacity: 0.5;
+}
+
+.dropzone .dz-message {
+ text-align: center;
+ margin: 2em 0;
+}
+
+.dropzone .dz-preview {
+ position: relative;
+ display: inline-block;
+ vertical-align: top;
+ margin: 16px;
+ min-height: 100px;
+}
+
+.dropzone .dz-preview:hover {
+ z-index: 1000;
+}
+
+.dropzone .dz-preview:hover .dz-details {
+ opacity: 1;
+}
+
+.dropzone .dz-preview.dz-file-preview .dz-image {
+ border-radius: 20px;
+ background: #999;
+ background: linear-gradient(to bottom, #eee, #ddd);
+}
+
+.dropzone .dz-preview.dz-file-preview .dz-details {
+ opacity: 1;
+}
+
+.dropzone .dz-preview.dz-image-preview {
+ background: white;
+}
+
+.dropzone .dz-preview.dz-image-preview .dz-details {
+ -webkit-transition: opacity 0.2s linear;
+ -moz-transition: opacity 0.2s linear;
+ -ms-transition: opacity 0.2s linear;
+ -o-transition: opacity 0.2s linear;
+ transition: opacity 0.2s linear;
+}
+
+.dropzone .dz-preview .dz-remove {
+ font-size: 14px;
+ text-align: center;
+ display: block;
+ cursor: pointer;
+ border: none;
+}
+
+.dropzone .dz-preview .dz-remove:hover {
+ text-decoration: underline;
+}
+
+.dropzone .dz-preview:hover .dz-details {
+ opacity: 1;
+}
+
+.dropzone .dz-preview .dz-details {
+ z-index: 20;
+ position: absolute;
+ top: 0;
+ left: 0;
+ opacity: 0;
+ font-size: 13px;
+ min-width: 100%;
+ max-width: 100%;
+ padding: 2em 1em;
+ text-align: center;
+ color: rgba(0, 0, 0, 0.9);
+ line-height: 150%;
+}
+
+.dropzone .dz-preview .dz-details .dz-size {
+ margin-bottom: 1em;
+ font-size: 16px;
+}
+
+.dropzone .dz-preview .dz-details .dz-filename {
+ white-space: nowrap;
+}
+
+.dropzone .dz-preview .dz-details .dz-filename:hover span {
+ border: 1px solid rgba(200, 200, 200, 0.8);
+ background-color: rgba(255, 255, 255, 0.8);
+}
+
+.dropzone .dz-preview .dz-details .dz-filename:not(:hover) {
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.dropzone .dz-preview .dz-details .dz-filename:not(:hover) span {
+ border: 1px solid transparent;
+}
+
+.dropzone .dz-preview .dz-details .dz-filename span,
+.dropzone .dz-preview .dz-details .dz-size span {
+ background-color: rgba(255, 255, 255, 0.4);
+ padding: 0 0.4em;
+ border-radius: 3px;
+}
+
+.dropzone .dz-preview:hover .dz-image img {
+ -webkit-transform: scale(1.05, 1.05);
+ -moz-transform: scale(1.05, 1.05);
+ -ms-transform: scale(1.05, 1.05);
+ -o-transform: scale(1.05, 1.05);
+ transform: scale(1.05, 1.05);
+ -webkit-filter: blur(8px);
+ filter: blur(8px);
+}
+
+.dropzone .dz-preview .dz-image {
+ border-radius: 20px;
+ overflow: hidden;
+ width: 120px;
+ height: 120px;
+ position: relative;
+ display: block;
+ z-index: 10;
+}
+
+.dropzone .dz-preview .dz-image img {
+ display: block;
+}
+
+.dropzone .dz-preview.dz-success .dz-success-mark {
+ -webkit-animation: passing-through 3s cubic-bezier(0.77, 0, 0.175, 1);
+ -moz-animation: passing-through 3s cubic-bezier(0.77, 0, 0.175, 1);
+ -ms-animation: passing-through 3s cubic-bezier(0.77, 0, 0.175, 1);
+ -o-animation: passing-through 3s cubic-bezier(0.77, 0, 0.175, 1);
+ animation: passing-through 3s cubic-bezier(0.77, 0, 0.175, 1);
+}
+
+.dropzone .dz-preview.dz-error .dz-error-mark {
+ opacity: 1;
+ -webkit-animation: slide-in 3s cubic-bezier(0.77, 0, 0.175, 1);
+ -moz-animation: slide-in 3s cubic-bezier(0.77, 0, 0.175, 1);
+ -ms-animation: slide-in 3s cubic-bezier(0.77, 0, 0.175, 1);
+ -o-animation: slide-in 3s cubic-bezier(0.77, 0, 0.175, 1);
+ animation: slide-in 3s cubic-bezier(0.77, 0, 0.175, 1);
+}
+
+.dropzone .dz-preview .dz-success-mark,
+.dropzone .dz-preview .dz-error-mark {
+ pointer-events: none;
+ opacity: 0;
+ z-index: 500;
+ position: absolute;
+ display: block;
+ top: 50%;
+ left: 50%;
+ margin-left: -27px;
+ margin-top: -27px;
+}
+
+.dropzone .dz-preview .dz-success-mark svg,
+.dropzone .dz-preview .dz-error-mark svg {
+ display: block;
+ width: 54px;
+ height: 54px;
+}
+
+.dropzone .dz-preview.dz-processing .dz-progress {
+ opacity: 1;
+ -webkit-transition: all 0.2s linear;
+ -moz-transition: all 0.2s linear;
+ -ms-transition: all 0.2s linear;
+ -o-transition: all 0.2s linear;
+ transition: all 0.2s linear;
+}
+
+.dropzone .dz-preview.dz-complete .dz-progress {
+ opacity: 0;
+ -webkit-transition: opacity 0.4s ease-in;
+ -moz-transition: opacity 0.4s ease-in;
+ -ms-transition: opacity 0.4s ease-in;
+ -o-transition: opacity 0.4s ease-in;
+ transition: opacity 0.4s ease-in;
+}
+
+.dropzone .dz-preview:not(.dz-processing) .dz-progress {
+ -webkit-animation: pulse 6s ease infinite;
+ -moz-animation: pulse 6s ease infinite;
+ -ms-animation: pulse 6s ease infinite;
+ -o-animation: pulse 6s ease infinite;
+ animation: pulse 6s ease infinite;
+}
+
+.dropzone .dz-preview .dz-progress {
+ opacity: 1;
+ z-index: 1000;
+ pointer-events: none;
+ position: absolute;
+ height: 16px;
+ left: 50%;
+ top: 50%;
+ margin-top: -8px;
+ width: 80px;
+ margin-left: -40px;
+ background: rgba(255, 255, 255, 0.9);
+ -webkit-transform: scale(1);
+ border-radius: 8px;
+ overflow: hidden;
+}
+
+.dropzone .dz-preview .dz-progress .dz-upload {
+ background: #333;
+ background: linear-gradient(to bottom, #666, #444);
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ width: 0;
+ -webkit-transition: width 300ms ease-in-out;
+ -moz-transition: width 300ms ease-in-out;
+ -ms-transition: width 300ms ease-in-out;
+ -o-transition: width 300ms ease-in-out;
+ transition: width 300ms ease-in-out;
+}
+
+.dropzone .dz-preview.dz-error .dz-error-message {
+ display: block;
+}
+
+.dropzone .dz-preview.dz-error:hover .dz-error-message {
+ opacity: 1;
+ pointer-events: auto;
+}
+
+.dropzone .dz-preview .dz-error-message {
+ pointer-events: none;
+ z-index: 1000;
+ position: absolute;
+ display: block;
+ display: none;
+ opacity: 0;
+ -webkit-transition: opacity 0.3s ease;
+ -moz-transition: opacity 0.3s ease;
+ -ms-transition: opacity 0.3s ease;
+ -o-transition: opacity 0.3s ease;
+ transition: opacity 0.3s ease;
+ border-radius: 8px;
+ font-size: 13px;
+ top: 130px;
+ left: -10px;
+ width: 140px;
+ background: #be2626;
+ background: linear-gradient(to bottom, #be2626, #a92222);
+ padding: 0.5em 1.2em;
+ color: white;
+}
+
+.dropzone .dz-preview .dz-error-message:after {
+ content: '';
+ position: absolute;
+ top: -6px;
+ left: 64px;
+ width: 0;
+ height: 0;
+ border-left: 6px solid transparent;
+ border-right: 6px solid transparent;
+ border-bottom: 6px solid #be2626;
+}
+/* ***** /dropzone ****** */
+.inl-bl{
+ display:inline-block;
+}
+
+.well .markup-heading{
+
+}
+.well .markup{
+ background: #fff;
+ color: #777;
+ position: relative;
+ padding: 45px 15px 15px;
+ margin: 15px 0 0 0;
+ background-color: #fff;
+ border-radius: 0 0 4px 4px;
+ box-shadow: none;
+}
+
+.well .markup::after{
+ content: "Example";
+ position: absolute;
+ top: 15px;
+ left: 15px;
+ font-size: 12px;
+ font-weight: bold;
+ color: #bbb;
+ text-transform: uppercase;
+ letter-spacing: 1px;
+}
+/* ***** autocomplete ***** */
+
+.autocomplete-suggestions {
+ border: 1px solid #e4e4e4;
+ background: #F4F4F4;
+ cursor: default;
+ overflow: auto;
+}
+
+.autocomplete-suggestion {
+ padding: 2px 5px;
+ font-size: 1.2em;
+ white-space: nowrap;
+ overflow: hidden;
+}
+
+.autocomplete-selected {
+ background: #f0f0f0;
+}
+
+.autocomplete-suggestions strong {
+ font-weight: normal;
+ color: #3399ff;
+ font-weight: bolder;
+}
+/* ***** /autocomplete *****/
+/* ***** buttons ********/
+
+.btn.btn-app {
+ position: relative;
+ padding: 15px 5px;
+ margin: 0 0 10px 10px;
+ min-width: 80px;
+ height: 60px;
+ -webkit-box-shadow: none;
+ -moz-box-shadow: none;
+ box-shadow: none;
+ -webkit-border-radius: 0;
+ -moz-border-radius: 0;
+ border-radius: 0;
+ text-align: center;
+ color: #666;
+ border: 1px solid #ddd;
+ background-color: #fafafa;
+ font-size: 12px;
+}
+
+.btn.btn-app > .fa,
+.btn.btn-app > .glyphicon,
+.btn.btn-app > .ion {
+ font-size: 20px;
+ display: block;
+}
+
+.btn.btn-app:hover {
+ background: #f4f4f4;
+ color: #444;
+ border-color: #aaa;
+}
+
+.btn.btn-app:active,
+.btn.btn-app:focus {
+ -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+ -moz-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+ box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
+}
+
+.btn.btn-app > .badge {
+ position: absolute;
+ top: -3px;
+ right: -10px;
+ font-size: 10px;
+ font-weight: 400;
+}
+/* ***** /buttons *******/
+/* ********* /form design **************************** */
+/* ********* calender dropdown **************************** */
+
+.daterangepicker.dropdown-menu {
+ font-size: 13px;
+ padding: 0;
+ overflow: hidden;
+}
+
+.daterangepicker.picker_1 {
+ background: #34495E;
+ color: #ECF0F1;
+}
+
+.daterangepicker.picker_1 table.table-condensed thead tr:first-child {
+ background: #1ABB9C;
+}
+
+.daterangepicker table.table-condensed thead tr:first-child th {
+ line-height: 28px;
+ text-align: center;
+}
+
+.daterangepicker.picker_1 table.table-condensed thead tr {
+ background: #213345;
+}
+
+.daterangepicker table.table-condensed thead tr {
+ line-height: 14px;
+}
+
+.daterangepicker table.table-condensed tbody tr:first-child td {
+ padding-top: 10px;
+}
+
+.daterangepicker table.table-condensed th:first-child,
+.daterangepicker table.table-condensed td:first-child {
+ padding-left: 12px
+}
+
+.daterangepicker table.table-condensed th:last-child,
+.daterangepicker table.table-condensed td:last-child {
+ padding-right: 12px
+}
+
+.table-condensed>thead>tr>th,
+.table-condensed>tbody>tr>th,
+.table-condensed>tfoot>tr>th,
+.table-condensed>thead>tr>td,
+.table-condensed>tbody>tr>td,
+.table-condensed>tfoot>tr>td {
+ padding: 5px 7px;
+ text-align: center;
+}
+
+.daterangepicker table.table-condensed tbody tr:last-child td {
+ padding-bottom: 10px;
+}
+
+.daterangepicker.picker_2 table.table-condensed thead tr:first-child {
+ color: inherit;
+}
+
+.daterangepicker.picker_2 table.table-condensed thead tr {
+ color: #1ABB9C;
+}
+
+.daterangepicker.picker_3 table.table-condensed thead tr:first-child {
+ background: #1ABB9C;
+ color: #ECF0F1;
+}
+
+.daterangepicker.picker_4 table.table-condensed tbody td {
+ background: #ECF0F1;
+ color: #34495E;
+ border: 1px solid #fff;
+ padding: 4px 7px;
+}
+
+.daterangepicker.picker_4 table.table-condensed tbody td.active {
+ background: #536A7F;
+ color: #fff;
+}
+
+.daterangepicker.picker_4 table.table-condensed thead tr:first-child {
+ background: #34495E;
+ color: #ECF0F1;
+}
+
+.xdisplay_input {
+ width: 240px;
+ overflow: hidden;
+ padding: 0;
+}
+
+.xdisplay {
+ background-color: #fff;
+ -webkit-background-clip: padding-box;
+ background-clip: padding-box;
+ border: 1px solid #ccc;
+ margin-bottom: 20px;
+ border: 1px solid rgba(0, 0, 0, .15);
+ border-radius: 4px;
+ width: 230px;
+ overflow: hidden;
+ -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+ box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+}
+
+.daterangepicker.opensright .ranges,
+.daterangepicker.opensright .calendar,
+.daterangepicker.openscenter .ranges,
+.daterangepicker.openscenter .calendar {
+ float: right;
+}
+
+.daterangepicker.dropdown-menu .calendar {}
+
+.daterangepicker table {
+ width: 100%;
+ margin: 0;
+}
+
+.daterangepicker td,
+.daterangepicker th {
+ text-align: center;
+ width: 20px;
+ height: 20px;
+ cursor: pointer;
+ white-space: nowrap;
+}
+
+.daterangepicker td.off {
+ color: #999;
+}
+
+.daterangepicker td.disabled {
+ color: #999;
+}
+
+.daterangepicker td.available:hover,
+.daterangepicker th.available:hover {
+ background: #eee;
+ color: #34495E;
+}
+
+.daterangepicker td.in-range {
+ background: #E4E7EA;
+ -webkit-border-radius: 0;
+ -moz-border-radius: 0;
+ border-radius: 0;
+}
+
+.daterangepicker td.available + td.start-date {
+ -webkit-border-radius: 4px 0 0 4px;
+ -moz-border-radius: 4px 0 0 4px;
+ border-radius: 4px 0 0 4px;
+}
+
+.daterangepicker td.in-range + td.end-date {
+ -webkit-border-radius: 0 4px 4px 0;
+ -moz-border-radius: 0 4px 4px 0;
+ border-radius: 0 4px 4px 0;
+}
+
+.daterangepicker td.start-date.end-date {
+ -webkit-border-radius: 4px !important;
+ -moz-border-radius: 4px !important;
+ border-radius: 4px !important;
+}
+
+.daterangepicker td.active,
+.daterangepicker td.active:hover {
+ background-color: #536A7F;
+ color: #fff;
+}
+
+.daterangepicker td.week,
+.daterangepicker th.week {
+ font-size: 80%;
+ color: #ccc;
+}
+
+.daterangepicker select.monthselect,
+.daterangepicker select.yearselect {
+ font-size: 12px;
+ padding: 1px;
+ height: auto;
+ margin: 0;
+ cursor: default;
+ height: 30px;
+ border: 1px solid #ADB2B5;
+ line-height: 30px;
+ border-radius: 0px !important;
+}
+
+.daterangepicker select.monthselect {
+ margin-right: 2%;
+ width: 56%;
+}
+
+.daterangepicker select.yearselect {
+ width: 40%;
+}
+
+.daterangepicker select.hourselect,
+.daterangepicker select.minuteselect,
+.daterangepicker select.ampmselect {
+ width: 50px;
+ margin-bottom: 0;
+}
+
+.daterangepicker_start_input {
+ float: left;
+}
+
+.daterangepicker_end_input {
+ float: left;
+ padding-left: 11px;
+}
+
+.daterangepicker th.month {
+ width: auto;
+}
+
+
+.daterangepicker .daterangepicker_start_input label,
+.daterangepicker .daterangepicker_end_input label {
+ color: #333;
+ display: block;
+ font-size: 11px;
+ font-weight: normal;
+ height: 20px;
+ line-height: 20px;
+ margin-bottom: 2px;
+ text-shadow: #fff 1px 1px 0px;
+ text-transform: uppercase;
+ width: 74px;
+}
+
+.daterangepicker .ranges input {
+ font-size: 11px;
+}
+
+.daterangepicker .ranges .input-mini {
+ background-color: #eee;
+ border: 1px solid #ccc;
+ border-radius: 4px;
+ color: #555;
+ display: block;
+ font-size: 11px;
+ height: 30px;
+ line-height: 30px;
+ vertical-align: middle;
+ margin: 0 0 10px 0;
+ padding: 0 6px;
+ width: 74px;
+}
+
+.daterangepicker .ranges .input-mini:hover {
+ cursor: pointer;
+}
+
+.daterangepicker .ranges ul {
+ list-style: none;
+ margin: 0;
+ padding: 0;
+}
+
+.daterangepicker .ranges li {
+ font-size: 13px;
+ background: #f5f5f5;
+ border: 1px solid #f5f5f5;
+ color: #536A7F;
+ padding: 3px 12px;
+ margin-bottom: 8px;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+ cursor: pointer;
+}
+
+.daterangepicker .ranges li.active,
+.daterangepicker .ranges li:hover {
+ background: #536A7F;
+ border: 1px solid #536A7F;
+ color: #fff;
+}
+
+.daterangepicker .calendar {
+ display: none;
+ max-width: 270px;
+}
+
+.daterangepicker.show-calendar .calendar {
+ display: block;
+}
+
+.daterangepicker .calendar.single .calendar-date {
+ border: none;
+}
+
+
+.daterangepicker.single .ranges,
+.daterangepicker.single .calendar {
+ float: none;
+}
+
+.daterangepicker .ranges {
+ width: 160px;
+ text-align: left;
+ margin: 4px;
+}
+
+.daterangepicker .ranges .range_inputs>div {
+ float: left;
+}
+
+.daterangepicker .ranges .range_inputs>div:nth-child(2) {
+ padding-left: 11px;
+}
+
+.daterangepicker.opensleft .ranges,
+.daterangepicker.opensleft .calendar {
+ float: left;
+ margin: 4px;
+}
+/* ********* /calender dropdown **************************** */
+/* ********* form textarea **************************** */
+textarea {
+ padding: 10px;
+ vertical-align: top;
+ width: 200px;
+}
+textarea:focus {
+ outline-style: solid;
+ outline-width: 2px;
+}
+.btn_ {
+ display: inline-block;
+ padding: 3px 9px;
+ margin-bottom: 0;
+ font-size: 14px;
+ line-height: 20px;
+ text-align: center;
+ vertical-align: middle;
+ cursor: pointer;
+ color: #333333;
+ text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
+ background-color: #f5f5f5;
+ background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6);
+ background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#e6e6e6));
+ background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6);
+ background-image: -o-linear-gradient(top, #ffffff, #e6e6e6);
+ background-image: linear-gradient(to bottom, #ffffff, #e6e6e6);
+ background-repeat: repeat-x;
+ filter: progid: DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0);
+ border-color: #e6e6e6 #e6e6e6 #bfbfbf;
+ border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
+ filter: progid: DXImageTransform.Microsoft.gradient(enabled=false);
+ border: 1px solid #cccccc;
+ border-bottom-color: #b3b3b3;
+ -webkit-border-radius: 4px;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .2), 0 1px 2px rgba(0, 0, 0, .05);
+ -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .2), 0 1px 2px rgba(0, 0, 0, .05);
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, .2), 0 1px 2px rgba(0, 0, 0, .05);
+}
+/* ********* /form textarea **************************** */
+/* ********* glyphicons **************************** */
+
+.bs-glyphicons {
+ margin: 0 -10px 20px;
+ overflow: hidden
+}
+
+.bs-glyphicons-list {
+ padding-left: 0;
+ list-style: none
+}
+
+.bs-glyphicons li {
+ float: left;
+ width: 25%;
+ height: 115px;
+ padding: 10px;
+ font-size: 10px;
+ line-height: 1.4;
+ text-align: center;
+ background-color: #f9f9f9;
+ border: 1px solid #fff
+}
+
+.bs-glyphicons .glyphicon {
+ margin-top: 5px;
+ margin-bottom: 10px;
+ font-size: 24px
+}
+
+.bs-glyphicons .glyphicon-class {
+ display: block;
+ text-align: center;
+ word-wrap: break-word
+}
+
+.bs-glyphicons li:hover {
+ color: #fff;
+ background-color: #1ABB9C
+}
+
+@media (min-width: 768px) {
+ .bs-glyphicons {
+ margin-right: 0;
+ margin-left: 0
+ }
+ .bs-glyphicons li {
+ width: 12.5%;
+ font-size: 12px
+ }
+}
+/* ********* /glyphicons **************************** */
+/* ********* form tags input **************************** */
+
+.tagsinput {
+ border: 1px solid #CCC;
+ background: #FFF;
+ padding: 6px 6px 0;
+ width: 300px;
+ overflow-y: auto;
+}
+
+span.tag {
+ -moz-border-radius: 2px;
+ -webkit-border-radius: 2px;
+ display: block;
+ float: left;
+ padding: 5px 9px;
+ text-decoration: none;
+ background: #1ABB9C;
+ color: #F1F6F7;
+ margin-right: 5px;
+ font-weight: 500;
+ margin-bottom: 5px;
+ font-family: helvetica;
+}
+
+span.tag a {
+ color: #F1F6F7 !important;
+}
+
+.tagsinput span.tag a {
+ font-weight: bold;
+ color: #82ad2b;
+ text-decoration: none;
+ font-size: 11px;
+}
+
+.tagsinput input {
+ width: 80px;
+ margin: 0px;
+ font-family: helvetica;
+ font-size: 13px;
+ border: 1px solid transparent;
+ padding: 3px;
+ background: transparent;
+ color: #000;
+ outline: 0px;
+}
+
+.tagsinput div {
+ display: block;
+ float: left;
+}
+
+.tags_clear {
+ clear: both;
+ width: 100%;
+ height: 0px;
+}
+
+.not_valid {
+ background: #FBD8DB !important;
+ color: #90111A !important;
+}
+/* ********* /form tags input **************************** */
+/* ********* tabs **************************** */
+
+ul.bar_tabs {
+ /* border: 1px solid #ff0000; */
+
+ overflow: visible;
+ background: #F5F7FA;
+ height: 25px;
+ margin: 21px 0 14px;
+ padding-left: 14px;
+ position: relative;
+ z-index: 1;
+ width: 100%;
+ border-bottom: 1px solid #E6E9ED;
+}
+
+ul.bar_tabs > li {
+ border: 1px solid #E6E9ED;
+ color: #333 !important;
+ margin-top: -17px;
+ margin-left: 8px;
+ background: #fff;
+ border-bottom: none;
+ border-radius: 4px 4px 0 0;
+}
+
+ul.bar_tabs > li.active {
+ border-right: 6px solid #D3D6DA;
+ border-top: 0;
+ margin-top: -15px;
+}
+
+ul.bar_tabs > li.active a {
+ background: #fff;
+ border-color: transparent;
+}
+
+ul.bar_tabs > li a {
+ padding: 10px 17px;
+ background: #F5F7FA;
+ margin: 0;
+ border-radius: 0;
+}
+
+ul.bar_tabs.right {
+ padding-right: 14px;
+}
+
+ul.bar_tabs.right li {
+ float: right
+}
+
+a:focus {
+ outline: none;
+}
+/* ********* /tabs **************************** */
+/* ********* timeline **************************** */
+
+ul.timeline li {
+ position: relative;
+ border-bottom: 1px solid #e8e8e8;
+ clear: both;
+}
+
+.timeline .block {
+ margin: 0;
+ border-left: 3px solid #e8e8e8;
+ overflow: visible;
+ padding: 10px 15px;
+ margin-left: 105px;
+}
+
+.timeline.widget {
+ min-width: 0;
+ max-width: inherit;
+}
+
+.timeline.widget .block {
+ margin-left: 5px;
+}
+
+.timeline .tags {
+ position: absolute;
+ top: 15px;
+ left: 0;
+ width: 84px;
+}
+
+.timeline .tag {
+ display: block;
+ height: 30px;
+ font-size: 13px;
+ padding: 8px;
+}
+
+.timeline .tag span {
+ display: block;
+ overflow: hidden;
+ width: 100%;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+}
+
+.tag {
+ line-height: 1;
+ background: #1ABB9C;
+ color: #fff !important;
+}
+
+.tag:after {
+ content: " ";
+ height: 30px;
+ width: 0;
+ position: absolute;
+ left: 100%;
+ top: 0;
+ margin: 0;
+ pointer-events: none;
+ border-top: 14px solid transparent;
+ border-bottom: 14px solid transparent;
+ border-left: 11px solid #1ABB9C;
+}
+
+.timeline h2.title {
+ position: relative;
+ font-size: 16px;
+ margin: 0;
+}
+
+.timeline h2.title:before {
+ content: "";
+ position: absolute;
+ left: -23px;
+ top: 3px;
+ display: block;
+ width: 14px;
+ height: 14px;
+ border: 3px solid #d2d3d2;
+ border-radius: 14px;
+ background: #f9f9f9;
+}
+
+.timeline .byline {
+ padding: .25em 0;
+}
+
+.byline {
+ -webkit-font-smoothing: antialiased;
+ font-style: italic;
+ font-size: .9375em;
+ line-height: 1.3;
+ color: #aab6aa;
+}
+
+ul.social li {
+ border: 0;
+}
+/* ********* /timeline **************************** */
+/* ********* easypie **************************** */
+/* ********* /easypie **************************** */
+/* ********* form wizard **************************** */
+
+.form_wizard .stepContainer {
+ display: block;
+ position: relative;
+ margin: 0;
+ padding: 0;
+ border: 0 solid #CCC;
+ overflow-x: hidden;
+}
+/**-------**/
+
+.wizard_horizontal ul.wizard_steps {
+ display: table;
+ list-style: none;
+ position: relative;
+ width: 100%;
+ margin: 0 0 20px;
+}
+
+.wizard_horizontal ul.wizard_steps li {
+ display: table-cell;
+ text-align: center;
+}
+
+.wizard_horizontal ul.wizard_steps li a,
+.wizard_horizontal ul.wizard_steps li:hover {
+ display: block;
+ position: relative;
+ -moz-opacity: 1;
+ filter: alpha(opacity: 100);
+ opacity: 1;
+ color: #666;
+}
+
+.wizard_horizontal ul.wizard_steps li a:before {
+ content: "";
+ position: absolute;
+ height: 4px;
+ background: #ccc;
+ top: 20px;
+ width: 100%;
+ z-index: 4;
+ left: 0;
+}
+
+.wizard_horizontal ul.wizard_steps li a.disabled .step_no {
+ background: #ccc;
+}
+
+.wizard_horizontal ul.wizard_steps li a .step_no {
+ width: 40px;
+ height: 40px;
+ line-height: 40px;
+ border-radius: 100px;
+ display: block;
+ margin: 0 auto 5px;
+ font-size: 16px;
+ text-align: center;
+ position: relative;
+ z-index: 5;
+}
+
+.wizard_horizontal ul.wizard_steps li a.selected:before,
+.step_no {
+ background: #34495E;
+ color: #fff;
+}
+
+.wizard_horizontal ul.wizard_steps li a.done:before,
+.wizard_horizontal ul.wizard_steps li a.done .step_no {
+ background: #1ABB9C;
+ color: #fff;
+}
+
+.wizard_horizontal ul.wizard_steps li:first-child a:before {
+ left: 50%;
+}
+
+.wizard_horizontal ul.wizard_steps li:last-child a:before {
+ right: 50%;
+ width: 50%;
+ left: auto;
+}
+/**-------**/
+
+.wizard_verticle .stepContainer {
+ width: 80%;
+ float: left;
+ padding: 0 10px;
+}
+
+.form_wizard .stepContainer div.content {
+ display: block;
+ position: absolute;
+ float: left;
+ margin: 0;
+ padding: 5px;
+ font: normal 12px Verdana, Arial, Helvetica, sans-serif;
+ color: #5A5655;
+ height: 300px !important;
+ text-align: left;
+ overflow: auto;
+ z-index: 88;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ clear: both;
+}
+
+.actionBar {
+ width: 100%;
+ border-top: 1px solid #ddd;
+ padding: 10px 5px;
+ text-align: right;
+ margin-top: 10px;
+}
+
+.actionBar .buttonDisabled {
+ cursor: not-allowed;
+ pointer-events: none;
+ opacity: .65;
+ filter: alpha(opacity=65);
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+
+.actionBar a {
+ margin: 0 3px;
+}
+/**-------**/
+
+.wizard_verticle .wizard_content {
+ width: 80%;
+ float: left;
+ padding-left: 20px;
+}
+
+.wizard_verticle ul.wizard_steps {
+ display: table;
+ list-style: none;
+ position: relative;
+ width: 20%;
+ float: left;
+ margin: 0 0 20px;
+}
+
+.wizard_verticle ul.wizard_steps li {
+ display: list-item;
+ text-align: center;
+}
+
+.wizard_verticle ul.wizard_steps li a {
+ height: 80px;
+}
+
+.wizard_verticle ul.wizard_steps li a:first-child {
+ margin-top: 20px;
+}
+
+.wizard_verticle ul.wizard_steps li a,
+.wizard_verticle ul.wizard_steps li:hover {
+ display: block;
+ position: relative;
+ -moz-opacity: 1;
+ filter: alpha(opacity: 100);
+ opacity: 1;
+ color: #666;
+}
+
+.wizard_verticle ul.wizard_steps li a:before {
+ content: "";
+ position: absolute;
+ height: 100%;
+ background: #ccc;
+ top: 20px;
+ width: 4px;
+ z-index: 4;
+ left: 49%;
+}
+
+.wizard_verticle ul.wizard_steps li a.disabled .step_no {
+ background: #ccc;
+}
+
+.wizard_verticle ul.wizard_steps li a .step_no {
+ width: 40px;
+ height: 40px;
+ line-height: 40px;
+ border-radius: 100px;
+ display: block;
+ margin: 0 auto 5px;
+ font-size: 16px;
+ text-align: center;
+ position: relative;
+ z-index: 5;
+}
+
+.wizard_verticle ul.wizard_steps li a.selected:before,
+.step_no {
+ background: #34495E;
+ color: #fff;
+}
+
+.wizard_verticle ul.wizard_steps li a.done:before,
+.wizard_verticle ul.wizard_steps li a.done .step_no {
+ background: #1ABB9C;
+ color: #fff;
+}
+
+.wizard_verticle ul.wizard_steps li:first-child a:before {
+ left: 49%;
+}
+
+.wizard_verticle ul.wizard_steps li:last-child a:before {
+ left: 49%;
+ left: auto;
+ width: 0;
+}
+/**-------**/
+/* ********* /form wizard **************************** */
+/* ********* notifications **************************** */
+/* Pnotify by Hunter Perrin :: 2.0.1 */
+
+.ui-pnotify {
+ top: 25px;
+ right: 25px;
+ position: absolute;
+ height: auto;
+ /* Ensures notices are above everything */
+
+ z-index: 9999;
+}
+/* Hides position: fixed from IE6 */
+
+html > body > .ui-pnotify {
+ position: fixed;
+}
+
+.ui-pnotify .ui-pnotify-shadow {
+ -webkit-box-shadow: 0px 2px 10px rgba(50, 50, 50, 0.5);
+ -moz-box-shadow: 0px 2px 10px rgba(50, 50, 50, 0.5);
+ box-shadow: 0px 2px 10px rgba(50, 50, 50, 0.5);
+}
+
+.ui-pnotify-container {
+ background-position: 0 0;
+ padding: .8em;
+ height: 100%;
+ margin: 0;
+}
+
+.ui-pnotify-sharp {
+ -webkit-border-radius: 0;
+ -moz-border-radius: 0;
+ border-radius: 0;
+}
+
+.ui-pnotify-title {
+ display: block;
+ margin-bottom: .4em;
+ margin-top: 0;
+}
+
+.ui-pnotify-text {
+ display: block;
+}
+
+.ui-pnotify-icon,
+.ui-pnotify-icon span {
+ display: block;
+ float: left;
+ margin-right: .2em;
+}
+/* Alternate stack initial positioning. */
+
+.ui-pnotify.stack-topleft,
+.ui-pnotify.stack-bottomleft {
+ left: 25px;
+ right: auto;
+}
+
+.ui-pnotify.stack-bottomright,
+.ui-pnotify.stack-bottomleft {
+ bottom: 25px;
+ top: auto;
+}
+
+.ui-pnotify-closer,
+.ui-pnotify-sticker {
+ float: right;
+ margin-left: .2em;
+}
+/* theming */
+
+.alert-success {
+ color: #ffffff;
+ background-color: rgba(38, 185, 154, 0.88);
+ border-color: rgba(38, 185, 154, 0.88);
+}
+
+.alert-info {
+ color: #E9EDEF;
+ background-color: rgba(52, 152, 219, 0.88);
+ border-color: rgba(52, 152, 219, 0.88);
+}
+
+.alert-warning {
+ color: #E9EDEF;
+ background-color: rgba(243, 156, 18, 0.88);
+ border-color: rgba(243, 156, 18, 0.88);
+}
+
+.alert-danger,
+.alert-error {
+ color: #E9EDEF;
+ background-color: rgba(231, 76, 60, 0.88);
+ border-color: rgba(231, 76, 60, 0.88);
+}
+
+.alert-dark,
+.btn-dark {
+ color: #E9EDEF;
+ background-color: rgba(52, 73, 94, 0.88);
+ border-color: rgba(52, 73, 94, 0.88);
+}
+
+.btn-dark:hover {
+ color: #F7F7F7;
+}
+/* /theming */
+/* /Pnotify by Hunter Perrin :: 2.0.1 */
+
+.btn.btn-outline {
+ background: transparent;
+}
+
+.btn-primary.btn-outline {
+ color: #1A82C3;
+ border-width: 2px;
+}
+
+.btn-default.btn-outline {
+ color: #34495e;
+ border-width: 2px;
+}
+
+.btn-success.btn-outline {
+ color: #1ABB9C;
+ border-width: 2px;
+}
+
+.btn-info.btn-outline {
+ color: #34495e;
+ border-width: 2px;
+}
+
+.btn-warning.btn-outline {
+ color: #5bc0de;
+ border-width: 2px;
+}
+
+.btn-danger.btn-outline {
+ color: #34495e;
+ border-width: 2px;
+}
+
+.btn-dark.btn-outline {
+ color: #c0392b;
+ border-width: 2px;
+}
+
+.btn-warning {
+ background: #f0ad4e;
+ border-color: #f0ad4e;
+}
+
+.btn-danger {
+ background: #d9534f;
+ border-color: #d9534f;
+}
+
+.btn-primary:hover,
+.btn-primary:focus,
+.btn-primary:active,
+.btn-primary.active,
+.open .dropdown-toggle.btn-primary {
+ background-color: #1A82C3;
+ border-color: #1A82C3;
+ color: #FFFFFF;
+}
+
+.btn-success:hover,
+.btn-success:focus,
+.btn-success:active,
+.btn-success.active,
+.open .dropdown-toggle.btn-success {
+ background-color: #1ABB9C;
+ border-color: #1ABB9C;
+ color: #FFFFFF;
+}
+
+.btn-info:hover,
+.btn-info:focus,
+.btn-info:active,
+.btn-info.active,
+.open .dropdown-toggle.btn-info {
+ background-color: #4FB5D3;
+ border-color: #4FB5D3;
+ color: #FFFFFF;
+}
+
+.btn-warning:hover,
+.btn-warning:focus,
+.btn-warning:active,
+.btn-warning.active,
+.open .dropdown-toggle.btn-warning {
+ background-color: #d58512;
+ border-color: #d58512;
+ color: #FFFFFF;
+}
+
+.btn-primary:hover,
+.btn-primary:focus,
+.btn-primary:active,
+.btn-primary.active,
+.open .dropdown-toggle.btn-primary {
+ background-color: #1479B8;
+ border-color: #1479B8;
+ color: #FFFFFF;
+}
+
+.btn-danger:hover,
+.btn-danger:focus,
+.btn-danger:active,
+.btn-danger.active,
+.open .dropdown-toggle.btn-danger {
+ background-color: #d43f3a;
+ border-color: #d43f3a;
+ color: #FFFFFF;
+}
+
+.btn-dark:hover,
+.btn-dark:focus,
+.btn-dark:active,
+.btn-dark.active,
+.open .dropdown-toggle.btn-dark {
+ background-color: #394D5F;
+ border-color: #394D5F;
+ color: #FFFFFF;
+}
+
+.custom-notifications {
+ position: fixed;
+ margin: 15px;
+ right: 0;
+ float: right;
+ width: 400px;
+ z-index: 4000;
+ bottom: 0;
+}
+
+.btn-round {
+ border-radius: 30px;
+}
+/* ********* /notifications **************************** */
+/* ********* profile/social **************************** */
+
+.social-sidebar,
+.social-body {
+ float: right;
+}
+
+.social-sidebar {
+ background: #EDEDED;
+ width: 22%;
+}
+
+.social-body {
+ border: 1px solid #ccc;
+ width: 78%;
+}
+
+.thumb img {
+ width: 50px;
+ height: 50px;
+ border-radius: 50%;
+}
+
+.chat .thumb img {
+ width: 27px;
+ height: 27px;
+ border-radius: 50%;
+}
+
+.chat .status {
+ float: left;
+ margin: 16px 0 0 -16px;
+ font-size: 14px;
+ font-weight: bold;
+ width: 12px;
+ height: 12px;
+ display: block;
+ border: 2px solid #FFF;
+ z-index: 12312;
+ border-radius: 50%;
+}
+
+.chat .status.online {
+ background: #1ABB9C;
+}
+
+.chat .status.away {
+ background: #F39C12;
+}
+
+.chat .status.offline {
+ background: #ccc;
+}
+
+.chat .media-body {
+ padding-top: 5px;
+}
+/* ********* /profile/social **************************** */
+/* ********* widgets **************************** */
+
+.dashboard_graph .x_title {
+ padding: 5px 5px 7px;
+}
+
+.dashboard_graph .x_title h3 {
+ margin: 0;
+ font-weight: normal;
+}
+
+.chart {
+ position: relative;
+ display: inline-block;
+ width: 100px;
+ height: 100px;
+ margin-top: 5px;
+ margin-bottom: 5px;
+ text-align: center;
+}
+
+.chart canvas {
+ position: absolute;
+ top: 0;
+ left: 0;
+}
+
+.percent {
+ display: inline-block;
+ line-height: 96px;
+ z-index: 2;
+ font-size: 18px;
+}
+
+.percent:after {
+ content: '%';
+ margin-left: 0.1em;
+ font-size: .8em;
+}
+
+.angular {
+ margin-top: 100px;
+}
+
+.angular .chart {
+ margin-top: 0;
+}
+
+.widget {
+ min-width: 250px;
+ max-width: 310px;
+}
+
+.widget_tally_box .btn-group button {
+ text-align: center
+}
+
+.widget_tally_box .btn-group button {
+ color: inherit;
+ font-weight: 500;
+ background-color: #f5f5f5;
+ border: 1px solid #e7e7e7;
+}
+
+ul.widget_tally,
+ul.widget_tally li {
+ width: 100%;
+}
+
+ul.widget_tally li {
+ padding: 2px 10px;
+ border-bottom: 1px solid #ECECEC;
+ padding-bottom: 4px;
+}
+
+ul.widget_tally .month {
+ width: 70%;
+ float: left;
+}
+
+ul.widget_tally .count {
+ width: 30%;
+ float: left;
+ text-align: right
+}
+
+.pie_bg {
+ border-bottom: 1px solid rgba(101, 204, 182, 0.16);
+ padding-bottom: 15px;
+ -webkit-border-radius: 4px;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+ filter: progid: DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0);
+ filter: progid: DXImageTransform.Microsoft.gradient(enabled=false);
+ padding-bottom: 10px;
+ -webkit-box-shadow: 0 4px 6px -6px #222;
+ -moz-box-shadow: 0 4px 6px -6px #222;
+ box-shadow: 0 4px 6px -6px #222;
+}
+
+.widget_tally_box .flex {
+ display: flex;
+}
+
+ul.widget_profile_box {
+ width: 100%;
+ height: 42px;
+ padding: 3px;
+ background: #ececec;
+ margin-top: 40px;
+ margin-left: 1px;
+}
+
+ul.widget_profile_box li:first-child {
+ width: 25%;
+ float: left;
+}
+
+ul.widget_profile_box li:first-child a {
+ float: left;
+}
+
+ul.widget_profile_box li:last-child {
+ width: 25%;
+ float: right;
+}
+
+ul.widget_profile_box li:last-child a {
+ float: right;
+}
+
+ul.widget_profile_box li {}
+
+ul.widget_profile_box li a {
+ font-size: 22px;
+ text-align: center;
+ width: 35px;
+ height: 35px;
+ border: 1px solid rgba(52, 73, 94, 0.44);
+ display: block;
+ border-radius: 50%;
+ padding: 0px;
+}
+
+ul.widget_profile_box li a:hover {
+ color: #1ABB9C !important;
+ border: 1px solid rgba(38, 185, 154, 1);
+}
+
+ul.widget_profile_box li .profile_img {
+ width: 85px;
+ height: 85px;
+ margin: 0;
+ margin-top: -28px;
+}
+
+.widget_tally_box p,
+.widget_tally_box span {
+ text-align: center;
+}
+
+.widget_tally_box .name {
+ text-align: center;
+ margin: 25px;
+}
+
+.widget_tally_box .name_title {
+ text-align: center;
+ margin: 5px;
+}
+
+.widget_tally_box ul.legend {
+ margin: 0;
+}
+
+.widget_tally_box ul.legend p,
+.widget_tally_box ul.legend span {
+ text-align: left;
+}
+
+.widget_tally_box ul.legend li .icon {
+ font-size: 20px;
+ float: left;
+ width: 14px;
+}
+
+.widget_tally_box ul.legend li .name {
+ font-size: 14px;
+ margin: 5px 0 0 14px;
+ text-overflow: ellipsis;
+ float: left;
+}
+
+.widget_tally_box ul.legend p {
+ display: inline-block;
+ margin: 0;
+}
+
+.widget_tally_box ul.verticle_bars li {
+ height: 140px;
+ width: 23%;
+}
+
+.widget .verticle_bars li .progress.vertical.progress_wide {
+ width: 65%;
+}
+
+ul.count2 {
+ width: 100%;
+ margin-left: 1px;
+ border: 1px solid #ddd;
+ border-left: 0;
+ border-right: 0;
+ padding: 10px 0;
+}
+
+ul.count2 li {
+ width: 30%;
+ text-align: center;
+}
+
+ul.count2 li h3 {
+ font-weight: 400;
+ margin: 0;
+}
+
+ul.count2 li span {
+ font-weight: 300;
+}
+
+.divider {
+ border-bottom: 1px solid #ddd;
+ margin: 10px;
+}
+
+.divider-dashed {
+ border-top: 1px dashed #e7eaec;
+ background-color: #ffffff;
+ height: 1px;
+ margin: 10px 0;
+}
+
+ul.messages {
+ padding: 0;
+}
+
+ul.messages li,
+.tasks li {
+ border-bottom: 1px dotted #e6e6e6;
+ padding: 8px 0;
+}
+
+ul.messages li img.avatar,
+img.avatar {
+ height: 32px;
+ width: 32px;
+ float: left;
+ display: inline-block;
+ -webkit-border-radius: 2px;
+ -moz-border-radius: 2px;
+ border-radius: 2px;
+ padding: 2px;
+ background: #f7f7f7;
+ border: 1px solid #e6e6e6;
+}
+
+ul.messages li .message_date {
+ float: right;
+ text-align: right;
+}
+
+ul.messages li .message_wrapper {
+ margin-left: 50px;
+ margin-right: 40px;
+}
+
+ul.messages li .message_wrapper h4.heading {
+ font-weight: 600;
+ margin: 0;
+ cursor: pointer;
+ margin-bottom: 10px;
+ line-height: 100%;
+}
+
+ul.messages li .message_wrapper blockquote {
+ padding: 0px 10px;
+ margin: 0;
+ border-left: 5px solid #eee;
+}
+
+ul.user_data li {
+ margin-bottom: 6px;
+}
+
+ul.user_data li p {
+ margin-bottom: 0;
+}
+
+ul.user_data li .progress {
+ width: 90%;
+}
+
+.project_progress .progress {
+ margin-bottom: 3px !important;
+ margin-top: 5px;
+}
+
+.projects .list-inline {
+ margin: 0;
+}
+
+.profile_title {
+ background: #F5F7FA;
+ border: 0;
+ padding: 7px 0;
+ display: flex;
+}
+
+ul.stats-overview {
+ border-bottom: 1px solid #e8e8e8;
+ padding-bottom: 10px;
+ margin-bottom: 10px;
+}
+
+ul.stats-overview li {
+ display: inline-block;
+ text-align: center;
+ padding: 0 15px;
+ width: 30%;
+ font-size: 14px;
+ border-right: 1px solid #e8e8e8;
+}
+
+ul.stats-overview li:last-child {
+ border-right: 0;
+}
+
+ul.stats-overview li .name {
+ font-size: 12px;
+}
+
+ul.stats-overview li .value {
+ font-size: 14px;
+ font-weight: bold;
+ display: block;
+}
+
+ul.stats-overview li:first-child {
+ padding-left: 0;
+}
+
+ul.project_files li {
+ margin-bottom: 5px;
+}
+
+ul.project_files li a i {
+ width: 20px;
+}
+
+.project_detail p {
+ margin-bottom: 10px;
+}
+
+.project_detail p.title {
+ font-weight: bold;
+ margin-bottom: 0
+}
+
+.avatar img {
+ border-radius: 50%;
+ max-width: 45px;
+}
+/* ********* /widgets **************************** */
+/* ********* pricing **************************** */
+
+.pricing {
+ background: #fff;
+}
+
+.pricing .title {
+ background: #1ABB9C;
+ height: 110px;
+ color: #fff;
+ padding: 15px 0 0;
+ text-align: center;
+}
+
+.pricing .title h2 {
+ text-transform: capitalize;
+ font-size: 18px;
+ border-radius: 5px 5px 0 0;
+ margin: 0;
+ font-weight: 400;
+}
+
+.pricing .title h1 {
+ font-size: 30px;
+ margin: 12px;
+}
+
+.pricing .title span {
+ background: rgba(51, 51, 51, 0.28);
+ padding: 2px 5px;
+}
+
+.pricing_features {
+ background: #FAFAFA;
+ padding: 20px 15px;
+ min-height: 230px;
+ font-size: 13.5px;
+}
+
+.pricing_features ul li {
+ margin-top: 10px;
+}
+
+.pricing_footer {
+ padding: 10px 15px;
+ background-color: #f5f5f5;
+ border-top: 1px solid #ddd;
+ text-align: center;
+ border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px;
+}
+
+.pricing_footer p {
+ font-size: 13px;
+ padding: 10px 0 2px;
+ display: block;
+}
+
+.ui-ribbon-container {
+ position: relative;
+}
+
+.ui-ribbon-container .ui-ribbon-wrapper {
+ position: absolute;
+ overflow: hidden;
+ width: 85px;
+ height: 88px;
+ top: -3px;
+ right: -3px;
+}
+
+.ui-ribbon-container.ui-ribbon-primary .ui-ribbon {
+ background-color: #5b90bf;
+}
+
+.ui-ribbon-container .ui-ribbon {
+ position: relative;
+ display: block;
+ text-align: center;
+ font-size: 15px;
+ font-weight: 700;
+ color: #fff;
+ -webkit-transform: rotate(45deg);
+ -moz-transform: rotate(45deg);
+ -ms-transform: rotate(45deg);
+ -o-transform: rotate(45deg);
+ transform: rotate(45deg);
+ padding: 7px 0;
+ left: -5px;
+ top: 15px;
+ width: 120px;
+ line-height: 20px;
+ background-color: #555;
+ box-shadow: 0 0 3px rgba(0, 0, 0, .3);
+}
+
+.ui-ribbon-container.ui-ribbon-primary .ui-ribbon:after,
+.ui-ribbon-container.ui-ribbon-primary .ui-ribbon:before {
+ border-top: 2px solid #5b90bf;
+}
+
+.ui-ribbon-container .ui-ribbon:before {
+ left: 0;
+ bottom: -1px;
+}
+
+.ui-ribbon-container .ui-ribbon:before {
+ right: 0;
+}
+
+.ui-ribbon-container .ui-ribbon:after,
+.ui-ribbon-container .ui-ribbon:before {
+ position: absolute;
+ content: " ";
+ line-height: 0;
+ border-top: 2px solid #555;
+ border-left: 2px solid transparent;
+ border-right: 2px solid transparent;
+}
+/* ********* /pricing **************************** */
+/* ********* media gallery **************************** */
+
+.thumbnail .image {
+ height: 120px;
+ overflow: hidden;
+}
+
+.caption {
+ padding: 9px 5px;
+ background: #F7F7F7;
+}
+
+.caption p {
+ margin-bottom: 5px;
+}
+
+.thumbnail {
+ height: 190px;
+ overflow: hidden;
+}
+
+.view {
+ overflow: hidden;
+ position: relative;
+ text-align: center;
+ box-shadow: 1px 1px 2px #e6e6e6;
+ cursor: default;
+}
+
+.view .mask,
+.view .content {
+ position: absolute;
+ width: 100%;
+ overflow: hidden;
+ top: 0;
+ left: 0
+}
+
+.view img {
+ display: block;
+ position: relative
+}
+
+.view .tools {
+ text-transform: uppercase;
+ color: #fff;
+ text-align: center;
+ position: relative;
+ font-size: 17px;
+ padding: 3px;
+ background: rgba(0, 0, 0, 0.35);
+ margin: 43px 0 0 0;
+}
+
+.mask.no-caption .tools {
+ margin: 90px 0 0 0;
+}
+
+.view .tools a {
+ display: inline-block;
+ color: #FFF;
+ font-size: 18px;
+ font-weight: 400;
+ padding: 0 4px;
+}
+
+.view p {
+ font-family: Georgia, serif;
+ font-style: italic;
+ font-size: 12px;
+ position: relative;
+ color: #fff;
+ padding: 10px 20px 20px;
+ text-align: center
+}
+
+.view a.info {
+ display: inline-block;
+ text-decoration: none;
+ padding: 7px 14px;
+ background: #000;
+ color: #fff;
+ text-transform: uppercase;
+ box-shadow: 0 0 1px #000
+}
+
+.view-first img {
+ transition: all 0.2s linear;
+}
+
+.view-first .mask {
+ opacity: 0;
+ background-color: rgba(0, 0, 0, 0.5);
+ transition: all 0.4s ease-in-out;
+}
+
+.view-first .tools {
+ transform: translateY(-100px);
+ opacity: 0;
+ transition: all 0.2s ease-in-out;
+}
+
+.view-first p {
+ transform: translateY(100px);
+ opacity: 0;
+ transition: all 0.2s linear;
+}
+
+.view-first:hover img {
+ transform: scale(1.1);
+}
+
+.view-first:hover .mask {
+ opacity: 1;
+}
+
+.view-first:hover .tools,
+.view-first:hover p {
+ opacity: 1;
+ transform: translateY(0px);
+}
+
+.view-first:hover p {
+ transition-delay: 0.1s;
+}
+/* ********* /media gallery **************************** */
+/* ********* verticle tabs **************************** */
+/*!
+ * bootstrap-vertical-tabs - v1.2.1
+ * https://dbtek.github.io/bootstrap-vertical-tabs
+ * 2014-11-07
+ * Copyright (c) 2014 İsmail Demirbilek
+ * License: MIT
+ */
+
+.tabs-left,
+.tabs-right {
+ border-bottom: none;
+ padding-top: 2px;
+}
+
+.tabs-left {
+ border-right: 1px solid #F7F7F7;
+}
+
+.tabs-right {
+ border-left: 1px solid #F7F7F7;
+}
+
+.tabs-left>li,
+.tabs-right>li {
+ float: none;
+ margin-bottom: 2px;
+}
+
+.tabs-left>li {
+ margin-right: -1px;
+}
+
+.tabs-right>li {
+ margin-left: -1px;
+}
+
+.tabs-left>li.active>a,
+.tabs-left>li.active>a:hover,
+.tabs-left>li.active>a:focus {
+ border-bottom-color: #F7F7F7;
+ border-right-color: transparent;
+}
+
+.tabs-right>li.active>a,
+.tabs-right>li.active>a:hover,
+.tabs-right>li.active>a:focus {
+ border-bottom: 1px solid #F7F7F7;
+ border-left-color: transparent;
+}
+
+.tabs-left>li>a {
+ border-radius: 4px 0 0 4px;
+ margin-right: 0;
+ display: block;
+ background: #F7F7F7;
+ text-overflow: ellipsis;
+ overflow: hidden;
+}
+
+.tabs-right>li>a {
+ border-radius: 0 4px 4px 0;
+ margin-right: 0;
+ background: #F7F7F7;
+ text-overflow: ellipsis;
+ overflow: hidden;
+}
+
+.sideways {
+ margin-top: 50px;
+ border: none;
+ position: relative;
+}
+
+.sideways>li {
+ height: 20px;
+ width: 120px;
+ margin-bottom: 100px;
+}
+
+.sideways>li>a {
+ border-bottom: 1px solid #ddd;
+ border-right-color: transparent;
+ text-align: center;
+ border-radius: 4px 4px 0px 0px;
+}
+
+.sideways>li.active>a,
+.sideways>li.active>a:hover,
+.sideways>li.active>a:focus {
+ border-bottom-color: transparent;
+ border-right-color: #ddd;
+ border-left-color: #ddd;
+}
+
+.sideways.tabs-left {
+ left: -50px;
+}
+
+.sideways.tabs-right {
+ right: -50px;
+}
+
+.sideways.tabs-right>li {
+ -webkit-transform: rotate(90deg);
+ -moz-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+ -o-transform: rotate(90deg);
+ transform: rotate(90deg);
+}
+
+.sideways.tabs-left>li {
+ -webkit-transform: rotate(-90deg);
+ -moz-transform: rotate(-90deg);
+ -ms-transform: rotate(-90deg);
+ -o-transform: rotate(-90deg);
+ transform: rotate(-90deg);
+}
+/* ********* /verticle tabs **************************** */
+/* ********* image cropping **************************** */
+/*!
+ * Cropper v0.8.0
+ * https://github.com/fengyuanchen/cropper
+ *
+ * Copyright 2014-2015 Fengyuan Chen
+ * Released under the MIT license
+ *
+ * Date: 2015-02-19T06:49:29.144Z
+ */
+
+.cropper-container {
+ position: relative;
+ overflow: hidden;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ -webkit-tap-highlight-color: transparent;
+ -webkit-touch-callout: none;
+}
+
+.cropper-container img {
+ display: block;
+ width: 100%;
+ min-width: 0 !important;
+ max-width: none !important;
+ height: 100%;
+ min-height: 0 !important;
+ max-height: none !important;
+}
+
+.cropper-modal,
+.cropper-canvas {
+ position: absolute;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+}
+
+.cropper-canvas {
+ background-color: #fff;
+ filter: alpha(opacity=0);
+ opacity: 0;
+}
+
+.cropper-modal {
+ background-color: #000;
+ filter: alpha(opacity=50);
+ opacity: .5;
+}
+
+.cropper-cropbox {
+ position: absolute;
+ top: 10%;
+ left: 10%;
+ width: 80%;
+ height: 80%;
+}
+
+.cropper-viewer {
+ display: block;
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+ outline: 1px solid #69f;
+ outline-color: rgba(102, 153, 255, .75);
+}
+
+.cropper-dashed {
+ position: absolute;
+ display: block;
+ filter: alpha(opacity=50);
+ border: 0 dashed #fff;
+ opacity: .5;
+}
+
+.cropper-dashed.dashed-h {
+ top: 33.33333333%;
+ left: 0;
+ width: 100%;
+ height: 33.33333333%;
+ border-top-width: 1px;
+ border-bottom-width: 1px;
+}
+
+.cropper-dashed.dashed-v {
+ top: 0;
+ left: 33.33333333%;
+ width: 33.33333333%;
+ height: 100%;
+ border-right-width: 1px;
+ border-left-width: 1px;
+}
+
+.cropper-face,
+.cropper-line,
+.cropper-point {
+ position: absolute;
+ display: block;
+ width: 100%;
+ height: 100%;
+ filter: alpha(opacity=10);
+ opacity: .1;
+}
+
+.cropper-face {
+ top: 0;
+ left: 0;
+ cursor: move;
+ background-color: #fff;
+}
+
+.cropper-line {
+ background-color: #69f;
+}
+
+.cropper-line.line-e {
+ top: 0;
+ right: -3px;
+ width: 5px;
+ cursor: e-resize;
+}
+
+.cropper-line.line-n {
+ top: -3px;
+ left: 0;
+ height: 5px;
+ cursor: n-resize;
+}
+
+.cropper-line.line-w {
+ top: 0;
+ left: -3px;
+ width: 5px;
+ cursor: w-resize;
+}
+
+.cropper-line.line-s {
+ bottom: -3px;
+ left: 0;
+ height: 5px;
+ cursor: s-resize;
+}
+
+.cropper-point {
+ width: 5px;
+ height: 5px;
+ background-color: #69f;
+ filter: alpha(opacity=75);
+ opacity: .75;
+}
+
+.cropper-point.point-e {
+ top: 50%;
+ right: -3px;
+ margin-top: -3px;
+ cursor: e-resize;
+}
+
+.cropper-point.point-n {
+ top: -3px;
+ left: 50%;
+ margin-left: -3px;
+ cursor: n-resize;
+}
+
+.cropper-point.point-w {
+ top: 50%;
+ left: -3px;
+ margin-top: -3px;
+ cursor: w-resize;
+}
+
+.cropper-point.point-s {
+ bottom: -3px;
+ left: 50%;
+ margin-left: -3px;
+ cursor: s-resize;
+}
+
+.cropper-point.point-ne {
+ top: -3px;
+ right: -3px;
+ cursor: ne-resize;
+}
+
+.cropper-point.point-nw {
+ top: -3px;
+ left: -3px;
+ cursor: nw-resize;
+}
+
+.cropper-point.point-sw {
+ bottom: -3px;
+ left: -3px;
+ cursor: sw-resize;
+}
+
+.cropper-point.point-se {
+ right: -3px;
+ bottom: -3px;
+ width: 20px;
+ height: 20px;
+ cursor: se-resize;
+ filter: alpha(opacity=100);
+ opacity: 1;
+}
+
+.cropper-point.point-se:before {
+ position: absolute;
+ right: -50%;
+ bottom: -50%;
+ display: block;
+ width: 200%;
+ height: 200%;
+ content: " ";
+ background-color: #69f;
+ filter: alpha(opacity=0);
+ opacity: 0;
+}
+
+@media (min-width: 768px) {
+ .cropper-point.point-se {
+ width: 15px;
+ height: 15px;
+ }
+}
+
+@media (min-width: 992px) {
+ .cropper-point.point-se {
+ width: 10px;
+ height: 10px;
+ }
+}
+
+@media (min-width: 1200px) {
+ .cropper-point.point-se {
+ width: 5px;
+ height: 5px;
+ filter: alpha(opacity=75);
+ opacity: .75;
+ }
+}
+
+.cropper-bg {
+ background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAQMAAAAlPW0iAAAAA3NCSVQICAjb4U/gAAAABlBMVEXMzMz////TjRV2AAAACXBIWXMAAArrAAAK6wGCiw1aAAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M26LyyjAAAABFJREFUCJlj+M/AgBVhF/0PAH6/D/HkDxOGAAAAAElFTkSuQmCC");
+}
+
+.cropper-invisible {
+ filter: alpha(opacity=0);
+ opacity: 0;
+}
+
+.cropper-hide {
+ position: fixed;
+ top: 0;
+ left: 0;
+ z-index: -1;
+ width: auto !important;
+ max-width: none !important;
+ height: auto !important;
+ max-height: none !important;
+ filter: alpha(opacity=0);
+ opacity: 0;
+}
+
+.cropper-hidden {
+ display: none !important;
+}
+
+.cropper-move {
+ cursor: move;
+}
+
+.cropper-crop {
+ cursor: crosshair;
+}
+
+.cropper-disabled .cropper-canvas,
+.cropper-disabled .cropper-face,
+.cropper-disabled .cropper-line,
+.cropper-disabled .cropper-point {
+ cursor: not-allowed;
+}
+
+.avatar-view {
+ display: block;
+ height: 220px;
+ width: 220px;
+ border: 3px solid #fff;
+ border-radius: 5px;
+ box-shadow: 0 0 5px rgba(0, 0, 0, .15);
+ cursor: pointer;
+ overflow: hidden;
+}
+
+.avatar-view img {
+ width: 100%;
+}
+
+.avatar-body {
+ padding-right: 15px;
+ padding-left: 15px;
+}
+
+.avatar-upload {
+ overflow: hidden;
+}
+
+.avatar-upload label {
+ display: block;
+ float: left;
+ clear: left;
+ width: 100px;
+}
+
+.avatar-upload input {
+ display: block;
+ margin-left: 110px;
+}
+
+.avater-alert {
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+
+.avatar-wrapper {
+ height: 364px;
+ width: 100%;
+ margin-top: 15px;
+ box-shadow: inset 0 0 5px rgba(0, 0, 0, .25);
+ background-color: #fcfcfc;
+ overflow: hidden;
+}
+
+.avatar-wrapper img {
+ display: block;
+ height: auto;
+ max-width: 100%;
+}
+
+.avatar-preview {
+ float: left;
+ margin-top: 15px;
+ margin-right: 15px;
+ border: 1px solid #eee;
+ border-radius: 4px;
+ background-color: #fff;
+ overflow: hidden;
+}
+
+.avatar-preview:hover {
+ border-color: #ccf;
+ box-shadow: 0 0 5px rgba(0, 0, 0, .15);
+}
+
+.avatar-preview img {
+ width: 100%;
+}
+
+.preview-lg {
+ height: 184px;
+ width: 184px;
+ margin-top: 15px;
+}
+
+.preview-md {
+ height: 100px;
+ width: 100px;
+}
+
+.preview-sm {
+ height: 50px;
+ width: 50px;
+}
+
+@media (min-width: 992px) {
+ .avatar-preview {
+ float: none;
+ }
+}
+
+.avatar-btns {
+ margin-top: 30px;
+ margin-bottom: 15px;
+}
+
+.avatar-btns .btn-group {
+ margin-right: 5px;
+}
+
+.loading {
+ display: none;
+ position: absolute;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ background: #fff url("../images/loading.gif") no-repeat center center;
+ opacity: .75;
+ filter: alpha(opacity=75);
+ z-index: 20140628;
+}
+/* ********* /image cropping **************************** */
+/* ********* moris **************************** */
+
+.morris-hover {
+ position: absolute;
+ z-index: 1000;
+}
+
+.morris-hover.morris-default-style {
+ padding: 6px;
+ color: #666;
+ background: rgba(243, 242, 243, 0.8);
+ border: solid 2px rgba(195, 194, 196, 0.8);
+ font-family: sans-serif;
+ font-size: 12px;
+ text-align: center;
+}
+
+.morris-hover.morris-default-style .morris-hover-row-label {
+ font-weight: bold;
+ margin: 0.25em 0;
+}
+
+.morris-hover.morris-default-style .morris-hover-point {
+ white-space: nowrap;
+ margin: 0.1em 0;
+}
+/* ********* /moris **************************** */
+/* ********* ecommerce **************************** */
+
+.price {
+ font-size: 40px;
+ font-weight: 400;
+ color: #26B99A;
+ margin: 0;
+}
+
+.prod_title {
+ border-bottom: 1px solid #DFDFDF;
+ padding-bottom: 5px;
+ margin: 30px 0;
+ font-size: 20px;
+ font-weight: 400;
+}
+
+.product-image img {
+ width: 90%;
+}
+
+.prod_color li {
+ margin: 0 10px;
+}
+
+.prod_color li p {
+ margin-bottom: 0;
+}
+
+.prod_size li {
+ padding: 0;
+}
+
+.prod_color .color {
+ width: 25px;
+ height: 25px;
+ border: 2px solid rgba(51, 51, 51, 0.28) !important;
+ padding: 2px;
+ border-radius: 50px;
+}
+
+.product_gallery a {
+ width: 100px;
+ height: 100px;
+ float: left;
+ margin: 10px;
+ border: 1px solid #e5e5e5;
+}
+
+.product_gallery a img {
+ width: 100%;
+ margin-top: 15px;
+}
+
+.product_price {
+ margin: 20px 0;
+ padding: 5px 10px;
+ background-color: #FFFFFF;
+ text-align: left;
+ border: 2px dashed #E0E0E0;
+}
+
+.price-tax {
+ font-size: 18px;
+}
+
+.product_social {
+ margin: 20px 0;
+}
+
+.product_social ul li a i {
+ font-size: 35px;
+}
+/* ********* /ecommerce **************************** */
+/* ********* progressbar **************************** */
+/*! bootstrap-progressbar v0.8.4 | Copyright (c) 2012-2014 Stephan Groß | MIT license | http://www.minddust.com */
+
+.progress_summary .progress {
+ margin: 5px 0 12px !important;
+}
+
+.progress_summary .row {
+ margin-bottom: 5px;
+}
+
+.progress_summary .row .col-xs-2 {
+ padding: 0
+}
+
+.progress_summary .more_info span {
+ text-align: right;
+ float: right;
+}
+
+.progress_summary .data span {
+ text-align: right;
+ float: right;
+}
+
+.progress_summary p {
+ margin-bottom: 3px;
+ width: 100%;
+}
+
+.progress_title .left {
+ float: left;
+ text-align: left;
+}
+
+.progress_title .right {
+ float: right;
+ text-align: right;
+ font-weight: 300;
+}
+
+@-webkit-keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+
+@keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+
+.progress {
+ border-radius: 0;
+ margin-bottom: 18px;
+}
+
+.progress.right .progress-bar {
+ float: right;
+ right: 0;
+}
+
+.progress.vertical {
+ width: 40px;
+}
+
+.progress.progress_sm {
+ border-radius: 0;
+ margin-bottom: 18px;
+ height: 10px !important;
+}
+
+.progress.progress_sm .progress-bar {
+ height: 10px !important;
+}
+
+.dashboard_graph p {
+ margin: 0 0 4px;
+}
+
+ul.verticle_bars {
+ width: 100%;
+}
+
+ul.verticle_bars li {
+ width: 23%;
+ height: 200px;
+ margin: 0;
+}
+
+.progress {
+ overflow: hidden;
+ height: 20px;
+ margin-bottom: 20px;
+ background-color: #f5f5f5;
+ -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
+ box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
+}
+
+.progress-bar {
+ float: left;
+ width: 0%;
+ height: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ color: #ffffff;
+ text-align: center;
+ background-color: #428bca;
+ -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
+ box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
+ -webkit-transition: width 0.6s ease;
+ -o-transition: width 0.6s ease;
+ transition: width 0.6s ease;
+}
+
+.progress-striped .progress-bar,
+.progress-bar-striped {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-size: 40px 40px;
+}
+
+.progress.active .progress-bar,
+.progress-bar.active {
+ -webkit-animation: progress-bar-stripes 2s linear infinite;
+ -o-animation: progress-bar-stripes 2s linear infinite;
+ animation: progress-bar-stripes 2s linear infinite;
+}
+
+.progress-bar-success {
+ background-color: #26B99A;
+}
+
+.progress-striped .progress-bar-success {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+
+.progress-bar-info {
+ background-color: #3498DB;
+}
+
+.progress-striped .progress-bar-info {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+
+.progress-bar-warning {
+ background-color: #F39C12;
+}
+
+.progress-striped .progress-bar-warning {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+
+.progress-bar-danger {
+ background-color: #d9534f;
+}
+
+.progress-striped .progress-bar-danger {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+
+.progress {
+ position: relative;
+}
+
+.progress .progress-bar {
+ position: absolute;
+ overflow: hidden;
+ line-height: 20px;
+}
+
+.progress .progressbar-back-text {
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ text-align: center;
+}
+
+.progress .progressbar-front-text {
+ display: block;
+ width: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ text-align: center;
+}
+
+.progress.right .progress-bar {
+ right: 0;
+}
+
+.progress.right .progressbar-front-text {
+ position: absolute;
+ right: 0;
+}
+
+.progress.vertical {
+ width: 20px;
+ height: 100%;
+ float: left;
+ margin-right: 10px;
+}
+
+.progress.vertical.progress_wide {
+ width: 35px;
+}
+
+.progress.vertical.bottom {
+ position: relative;
+}
+
+.progress.vertical.bottom .progressbar-front-text {
+ position: absolute;
+ bottom: 0;
+}
+
+.progress.vertical .progress-bar {
+ width: 100%;
+ height: 0;
+ -webkit-transition: height 0.6s ease;
+ -o-transition: height 0.6s ease;
+ transition: height 0.6s ease;
+}
+
+.progress.vertical.bottom .progress-bar {
+ position: absolute;
+ bottom: 0;
+}
+/* ********* /progressbar **************************** */
+/********* login ****************/
+
+a.hiddenanchor{
+ display: none;
+}
+/** The wrapper that will contain our two forms **/
+#wrapper{
+ right: 0px;
+ margin: 0px auto;
+ margin-top: 5%;
+ max-width: 350px;
+ position: relative;
+}
+
+#register,
+#login{
+ position: absolute;
+ top: 0px; width:100%;
+}
+#register{
+ z-index: 21;
+ opacity: 0; width:100%;
+}
+#login{
+ z-index: 22;
+}
+#toregister:target ~ #wrapper #register,
+#tologin:target ~ #wrapper #login{
+ z-index: 22;
+ -webkit-animation-name: fadeInLeft;
+ -moz-animation-name: fadeInLeft;
+ -ms-animation-name: fadeInLeft;
+ -o-animation-name: fadeInLeft;
+ animation-name: fadeInLeft;
+ -webkit-animation-delay: .1s;
+ -moz-animation-delay: .1s;
+ -o-animation-delay: .1s;
+ -ms-animation-delay: .1s;
+ animation-delay: .1s;
+}
+#toregister:target ~ #wrapper #login,
+#tologin:target ~ #wrapper #register{
+ -webkit-animation-name: fadeOutLeft;
+ -moz-animation-name: fadeOutLeft;
+ -ms-animation-name: fadeOutLeft;
+ -o-animation-name: fadeOutLeft;
+ animation-name: fadeOutLeft;
+}
+
+/** the actual animation, credit where due : http://daneden.me/animate/ ***/
+.animate{
+ -webkit-animation-duration: 0.5s;
+ -webkit-animation-timing-function: ease;
+ -webkit-animation-fill-mode: both;
+
+ -moz-animation-duration: 0.5s;
+ -moz-animation-timing-function: ease;
+ -moz-animation-fill-mode: both;
+
+ -o-animation-duration: 0.5s;
+ -o-animation-timing-function: ease;
+ -o-animation-fill-mode: both;
+
+ -ms-animation-duration: 0.5s;
+ -ms-animation-timing-function: ease;
+ -ms-animation-fill-mode: both;
+
+ animation-duration: 0.5s;
+ animation-timing-function: ease;
+ animation-fill-mode: both;
+}
+
+/********* /login ***************/
+/********** sign in ***************************/
+.login_box{ padding:20px; margin: auto;}
+a{ text-decoration: none; }
+a:hover{ text-decoration: underline; }
+.left { float:left; }
+.alignleft { float: left; margin-right: 15px; }
+.alignright { float: right; margin-left: 15px; }
+.clearfix:after,
+form:after {
+ content: ".";
+ display: block;
+ height: 0;
+ clear: both;
+ visibility: hidden;
+}
+
+.login_content {
+ margin: 0 auto;
+ padding: 25px 0 0;
+ position: relative;
+ text-align: center;
+ text-shadow: 0 1px 0 #fff;
+ min-width: 280px;
+}
+.login_content h1 {
+ font: normal 25px Helvetica, Arial, sans-serif;
+ letter-spacing: -0.05em;
+ line-height: 20px;
+ margin: 10px 0 30px;
+}
+.login_content h1:before,
+.login_content h1:after {
+ content: "";
+ height: 1px;
+ position: absolute;
+ top: 10px;
+ width: 27%;
+}
+.login_content h1:after {
+ background: rgb(126,126,126);
+ background: -moz-linear-gradient(left, rgba(126,126,126,1) 0%, rgba(255,255,255,1) 100%);
+ background: -webkit-linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -o-linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -ms-linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ right: 0;
+}
+.login_content h1:before {
+ background: rgb(126,126,126);
+ background: -moz-linear-gradient(right, rgba(126,126,126,1) 0%, rgba(255,255,255,1) 100%);
+ background: -webkit-linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -o-linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -ms-linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ left: 0;
+}
+.login_content h1:before,
+.login_content h1:after {
+ content: "";
+ height: 1px;
+ position: absolute;
+ top: 10px;
+ width: 20%;
+}
+.login_content h1:after {
+ background: rgb(126,126,126);
+ background: -moz-linear-gradient(left, rgba(126,126,126,1) 0%, rgba(255,255,255,1) 100%);
+ background: -webkit-linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -o-linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -ms-linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: linear-gradient(left, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ right: 0;
+}
+.login_content h1:before {
+ background: rgb(126,126,126);
+ background: -moz-linear-gradient(right, rgba(126,126,126,1) 0%, rgba(255,255,255,1) 100%);
+ background: -webkit-linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -o-linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: -ms-linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ background: linear-gradient(right, rgba(126,126,126,1) 0%,rgba(255,255,255,1) 100%);
+ left: 0;
+}
+.login_content form { margin: 20px 0; position: relative }
+.login_content form input[type="text"],
+.login_content form input[type="email"],
+.login_content form input[type="password"] {
+ -webkit-border-radius: 3px;
+ -moz-border-radius: 3px;
+ -ms-border-radius: 3px;
+ -o-border-radius: 3px;
+ border-radius: 3px;
+ -webkit-box-shadow: 0 1px 0 #fff, 0 -2px 5px rgba(0,0,0,0.08) inset;
+ -moz-box-shadow: 0 1px 0 #fff, 0 -2px 5px rgba(0,0,0,0.08) inset;
+ -ms-box-shadow: 0 1px 0 #fff, 0 -2px 5px rgba(0,0,0,0.08) inset;
+ -o-box-shadow: 0 1px 0 #fff, 0 -2px 5px rgba(0,0,0,0.08) inset;
+ box-shadow: 0 1px 0 #fff, 0 -2px 5px rgba(0,0,0,0.08) inset;
+ -webkit-transition: all 0.5s ease;
+ -moz-transition: all 0.5s ease;
+ -ms-transition: all 0.5s ease;
+ -o-transition: all 0.5s ease;
+ transition: all 0.5s ease;
+ border: 1px solid #c8c8c8;
+ color: #777;
+ margin: 0 0 20px;
+ width: 100%;
+}
+.login_content form input[type="text"]:focus,
+.login_content form input[type="email"]:focus,
+.login_content form input[type="password"]:focus {
+ -webkit-box-shadow: 0 0 2px #AA77B4 inset;
+ -moz-box-shadow: 0 0 2px #ed1c24 inset;
+ -ms-box-shadow: 0 0 2px #ed1c24 inset;
+ -o-box-shadow: 0 0 2px #ed1c24 inset;
+ box-shadow: 0 0 2px #A97AAD inset;
+ background-color: #fff;
+ border: 1px solid #A878AF;
+ outline: none;
+}
+
+#username { background-position: 10px 10px !important }
+#password { background-position: 10px -53px !important }
+.login_content form div a {
+ font-size: 12px;
+ margin: 10px 15px 0 0;
+}
+.reset_pass{margin-top:10px !important;}
+.login_content div .reset_pass{margin-top: 13px !important; margin-right: 39px; float: right;}
+.separator{border-top: 1px solid #D8D8D8;
+ margin-top: 10px;
+ padding-top: 10px;}
+.button {
+ background: rgb(247,249,250);
+ background: -moz-linear-gradient(top, rgba(247,249,250,1) 0%, rgba(240,240,240,1) 100%);
+ background: -webkit-linear-gradient(top, rgba(247,249,250,1) 0%,rgba(240,240,240,1) 100%);
+ background: -o-linear-gradient(top, rgba(247,249,250,1) 0%,rgba(240,240,240,1) 100%);
+ background: -ms-linear-gradient(top, rgba(247,249,250,1) 0%,rgba(240,240,240,1) 100%);
+ background: linear-gradient(top, rgba(247,249,250,1) 0%,rgba(240,240,240,1) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#f7f9fa', endColorstr='#f0f0f0',GradientType=0 );
+ -webkit-box-shadow: 0 1px 2px rgba(0,0,0,0.1) inset;
+ -moz-box-shadow: 0 1px 2px rgba(0,0,0,0.1) inset;
+ -ms-box-shadow: 0 1px 2px rgba(0,0,0,0.1) inset;
+ -o-box-shadow: 0 1px 2px rgba(0,0,0,0.1) inset;
+ box-shadow: 0 1px 2px rgba(0,0,0,0.1) inset;
+ -webkit-border-radius: 0 0 5px 5px;
+ -moz-border-radius: 0 0 5px 5px;
+ -o-border-radius: 0 0 5px 5px;
+ -ms-border-radius: 0 0 5px 5px;
+ border-radius: 0 0 5px 5px;
+ border-top: 1px solid #CFD5D9;
+ padding: 15px 0;
+}
+.login_content form input[type="submit"],#content form .submit{float: left; margin-left: 38px;}
+.button a {
+ background: url(http://cssdeck.com/uploads/media/items/8/8bcLQqF.png) 0 -112px no-repeat;
+ color: #7E7E7E;
+ font-size: 17px;
+ padding: 2px 0 2px 40px;
+ text-decoration: none;
+ -webkit-transition: all 0.3s ease;
+ -moz-transition: all 0.3s ease;
+ -ms-transition: all 0.3s ease;
+ -o-transition: all 0.3s ease;
+ transition: all 0.3s ease;
+}
+.button a:hover {
+ background-position: 0 -135px;
+ color: #00aeef;
+}
+
+header{width:100%;}
+/********** end of sign in ********************/
+/* ********* start nprogress **************************** */
+/* Make clicks pass-through */
+#nprogress {
+ pointer-events: none;
+}
+
+#nprogress .bar {
+ background: #29d;
+
+ position: fixed;
+ z-index: 1031;
+ top: 0;
+ left: 0;
+
+ width: 100%;
+ height: 2px;
+}
+
+/* Fancy blur effect */
+#nprogress .peg {
+ display: block;
+ position: absolute;
+ right: 0px;
+ width: 100px;
+ height: 100%;
+ box-shadow: 0 0 10px #29d, 0 0 5px #29d;
+ opacity: 1.0;
+
+ -webkit-transform: rotate(3deg) translate(0px, -4px);
+ -ms-transform: rotate(3deg) translate(0px, -4px);
+ transform: rotate(3deg) translate(0px, -4px);
+}
+
+/* Remove these to get rid of the spinner */
+#nprogress .spinner {
+ display: block;
+ position: fixed;
+ z-index: 1031;
+ top: 15px;
+ right: 15px;
+}
+
+#nprogress .spinner-icon {
+ width: 18px;
+ height: 18px;
+ box-sizing: border-box;
+
+ border: solid 2px transparent;
+ border-top-color: #29d;
+ border-left-color: #29d;
+ border-radius: 50%;
+
+ -webkit-animation: nprogress-spinner 400ms linear infinite;
+ animation: nprogress-spinner 400ms linear infinite;
+}
+
+.nprogress-custom-parent {
+ overflow: hidden;
+ position: relative;
+}
+
+.nprogress-custom-parent #nprogress .spinner,
+.nprogress-custom-parent #nprogress .bar {
+ position: absolute;
+}
+
+@-webkit-keyframes nprogress-spinner {
+ 0% { -webkit-transform: rotate(0deg); }
+ 100% { -webkit-transform: rotate(360deg); }
+}
+@keyframes nprogress-spinner {
+ 0% { transform: rotate(0deg); }
+ 100% { transform: rotate(360deg); }
+}
+
+
+/* ********* end nprogress **************************** */
+/* ********* cropping **************************** */
+/* Main
+ * ========================================================================== */
+
+
+/* Icons
+ * -------------------------------------------------------------------------- */
+
+.icon {
+ display: inline-block;
+ width: 20px;
+ height: 20px;
+ background-image: url("../images/icons.png");
+ vertical-align: middle;
+}
+
+.icon-move {
+ background-position: 0 0;
+}
+
+.icon-crop {
+ background-position: -30px 0;
+}
+
+.icon-zoom-in {
+ background-position: -60px 0;
+}
+
+.icon-zoom-out {
+ background-position: -90px 0;
+}
+
+.icon-rotate-left {
+ background-position: -120px 0;
+}
+
+.icon-rotate-right {
+ background-position: -150px 0;
+}
+
+.icon-lock {
+ background-position: -180px 0;
+}
+
+.icon-unlock {
+ background-position: -210px 0;
+}
+
+.icon-remove {
+ background-position: -240px 0;
+}
+
+.icon-refresh {
+ background-position: -270px 0;
+}
+
+.icon-upload {
+ background-position: -300px 0;
+}
+
+.icon-off {
+ background-position: -330px 0;
+}
+
+.icon-info {
+ background-position: -360px 0;
+}
+
+
+/* Alerts
+ * -------------------------------------------------------------------------- */
+
+.docs-alert {
+ display: none;
+ position: fixed;
+ top: 20px;
+ left: 0;
+ right: 0;
+ height: 0;
+ text-align: center;
+ opacity: 0.9;
+}
+
+.docs-alert .message {
+ display: inline-block;
+ padding: 5px 10px;
+ border-radius: 2px;
+ background-color: #aaa;
+ color: #fff;
+}
+
+.docs-alert .primary {
+ background-color: #0074d9;
+}
+
+.docs-alert .success {
+ background-color: #2ecc40;
+}
+
+.docs-alert .info {
+ background-color: #39cccc;
+}
+
+.docs-alert .warning {
+ background-color: #ff851b;
+}
+
+.docs-alert .danger {
+ background-color: #ff4136;
+}
+
+/* Button
+ * -------------------------------------------------------------------------- */
+
+/* Basic style
+ * -------------------------------------------------------------------------- */
+
+body {
+ overflow-x: hidden;
+}
+
+
+/* Header */
+
+.docs-header {
+ border-color: #003973;
+ background-color: #00468c;
+ color: #fff;
+}
+
+.docs-header .navbar-brand {
+ color: #eee;
+}
+
+.docs-header .navbar-toggle {
+ border-color: #003973;
+ background-color: #00468c;
+}
+
+.docs-header .navbar-toggle:hover,
+.docs-header .navbar-toggle:focus {
+ border-color: #003366;
+ background-color: #003973;
+}
+
+.docs-header .navbar-collapse {
+ border-color: #003973;
+}
+
+.docs-header .navbar-text {
+ color: #ddd;
+}
+
+.docs-header .navbar-nav > li > a {
+ color: #eee;
+}
+
+
+/* Content */
+
+.img-container,
+.img-preview {
+ background-color: #f7f7f7;
+ overflow: hidden;
+ width: 100%;
+ text-align: center;
+}
+
+.img-container {
+ min-height: 200px;
+ max-height: 466px;
+ margin-bottom: 20px;
+}
+
+.img-container > img {
+ max-width: 100%;
+}
+
+.docs-preview {
+ margin-right: -15px;
+ margin-bottom: 10px;
+}
+
+.img-preview {
+ float: left;
+ margin-right: 10px;
+ margin-bottom: 10px;
+}
+
+.img-preview > img {
+ max-width: 100%;
+}
+
+.preview-lg {
+ width: 263px;
+ height: 148px;
+}
+
+.preview-md {
+ width: 139px;
+ height: 78px;
+}
+
+.preview-sm {
+ width: 69px;
+ height: 39px;
+}
+
+.preview-xs {
+ width: 35px;
+ height: 20px;
+ margin-right: 0;
+}
+
+.docs-data > .input-group {
+ margin-bottom: 10px;
+}
+
+.docs-data > .input-group > label {
+ min-width: 80px;
+}
+
+.docs-data > .input-group > span {
+ min-width: 50px;
+}
+
+.docs-buttons > .btn,
+.docs-buttons > .btn-group,
+.docs-buttons > .form-control {
+ margin-right: 5px;
+ margin-bottom: 10px;
+}
+
+.docs-toggles > .btn,
+.docs-toggles > .btn-group,
+.docs-toggles > .dropdown {
+ margin-bottom: 10px;
+}
+
+.docs-tooltip {
+ display: block;
+ margin: -6px -12px;
+ padding: 6px 12px;
+}
+
+.docs-tooltip > .icon {
+ margin: 0 -3px;
+ vertical-align: top;
+}
+
+.tooltip-inner {
+ white-space: normal;
+}
+
+.btn-upload .tooltip-inner {
+ white-space: nowrap;
+}
+
+@media (max-width: 400px) {
+ .btn-group-crop {
+ margin-right: -15px!important;
+ }
+
+ .btn-group-crop > .btn {
+ padding-left: 5px;
+ padding-right: 5px;
+ }
+
+ .btn-group-crop .docs-tooltip {
+ margin-left: -5px;
+ margin-right: -5px;
+ padding-left: 5px;
+ padding-right: 5px;
+ }
+}
+
+.docs-options .dropdown-menu {
+ width: 100%;
+}
+
+.docs-options .dropdown-menu > li {
+ padding: 3px 20px;
+}
+
+.docs-options .dropdown-menu > li:hover {
+ background-color: #f7f7f7;
+}
+
+.docs-options .dropdown-menu > li > label {
+ display: block;
+}
+
+.docs-cropped .modal-body {
+ text-align: center;
+}
+
+.docs-cropped .modal-body > img {
+ max-width: 100%;
+}
+
+/* ********* /cropping **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+/* ********* **************************** */
+
+ul.notifications {
+ float: right;
+ display: block;
+ margin-bottom: 7px;
+ padding: 0;
+ width: 100%;
+}
+
+.notifications li {
+ float: right;
+ margin: 3px;
+ width: 36px;
+ box-shadow: 3px 3px 3px rgba(0, 0, 0, 0.3);
+}
+
+.notifications li:last-child {
+ margin-left: 0;
+}
+
+.notifications a {
+ display: block;
+ text-align: center;
+ text-decoration: none;
+ text-transform: uppercase;
+ padding: 9px 8px;
+}
+
+.tabbed_notifications .text {
+ padding: 5px 15px;
+ height: 140px;
+ border-radius: 7px;
+ box-shadow: 6px 6px 6px rgba(0, 0, 0, 0.3);
+}
+
+.tabbed_notifications div p {
+ display: inline-block;
+}
+
+.tabbed_notifications h2 {
+ font-weight: bold;
+ text-transform: uppercase;
+ width: 80%;
+ float: left;
+ height: 20px;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ display: block;
+}
+
+.tabbed_notifications .close {
+ padding: 5px;
+ color: #E9EDEF;
+ float: right;
+ opacity: 1;
+}
+
+/*pace loader*/
+/*http://github.hubspot.com/pace/docs/welcome/*/
+.pace {
+ -webkit-pointer-events: none;
+ pointer-events: none;
+
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ user-select: none;
+}
+
+.pace-inactive {
+ display: none;
+}
+
+.pace .pace-progress {
+ background: #1abb9c;
+ position: fixed;
+ z-index: 2000;
+ top: 0;
+ right: 100%;
+ width: 100%;
+ height: 2px;
+}
+.copyright-info {
+ padding: 8px 0;
+}
diff --git a/asset/static/css/datatables/css/demo_page.css b/asset/static/css/datatables/css/demo_page.css
new file mode 100755
index 0000000..935ed24
--- /dev/null
+++ b/asset/static/css/datatables/css/demo_page.css
@@ -0,0 +1,127 @@
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * General page setup
+ */
+#dt_example {
+ font: 80%/1.45em "Lucida Grande", Verdana, Arial, Helvetica, sans-serif;
+ margin: 0;
+ padding: 0;
+ color: #333;
+ background-color: #fff;
+}
+
+
+#dt_example #container {
+ width: 800px;
+ margin: 30px auto;
+ padding: 0;
+}
+
+
+#dt_example #footer {
+ margin: 50px auto 0 auto;
+ padding: 0;
+}
+
+#dt_example #demo {
+ margin: 30px auto 0 auto;
+}
+
+#dt_example .demo_jui {
+ margin: 30px auto 0 auto;
+}
+
+#dt_example .big {
+ font-size: 1.3em;
+ font-weight: bold;
+ line-height: 1.6em;
+ color: #4E6CA3;
+}
+
+#dt_example .spacer {
+ height: 20px;
+ clear: both;
+}
+
+#dt_example .clear {
+ clear: both;
+}
+
+#dt_example pre {
+ padding: 15px;
+ background-color: #F5F5F5;
+ border: 1px solid #CCCCCC;
+}
+
+#dt_example h1 {
+ margin-top: 2em;
+ font-size: 1.3em;
+ font-weight: normal;
+ line-height: 1.6em;
+ color: #4E6CA3;
+ border-bottom: 1px solid #B0BED9;
+ clear: both;
+}
+
+#dt_example h2 {
+ font-size: 1.2em;
+ font-weight: normal;
+ line-height: 1.6em;
+ color: #4E6CA3;
+ clear: both;
+}
+
+#dt_example a {
+ color: #0063DC;
+ text-decoration: none;
+}
+
+#dt_example a:hover {
+ text-decoration: underline;
+}
+
+#dt_example ul {
+ color: #4E6CA3;
+}
+
+.css_right {
+ float: right;
+}
+
+.css_left {
+ float: left;
+}
+
+.demo_links {
+ float: left;
+ width: 50%;
+ margin-bottom: 1em;
+}
+
+#demo_info {
+ padding: 5px;
+ border: 1px solid #B0BED9;
+ height: 100px;
+ width: 100%;
+ overflow: auto;
+}
+
+#dt_example code {
+ font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
+ padding: 2px 4px !important;
+ white-space: nowrap;
+ font-size: 0.9em;
+
+ color: #D14;
+ background-color: #F7F7F9;
+
+ border: 1px solid #E1E1E8;
+ -webkit-border-radius: 3px;
+ -moz-border-radius: 3px;
+ border-radius: 3px;
+}
+
+#dt_example div.syntaxhighlighter code {
+ padding: 0 !important;
+}
+
diff --git a/asset/static/css/datatables/css/demo_table.css b/asset/static/css/datatables/css/demo_table.css
new file mode 100755
index 0000000..8b7b7d9
--- /dev/null
+++ b/asset/static/css/datatables/css/demo_table.css
@@ -0,0 +1,258 @@
+.dataTables_wrapper {
+ position: relative;
+ clear: both;
+ zoom: 1; /* Feeling sorry for IE */
+}
+
+.dataTables_processing {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 250px;
+ height: 30px;
+ margin-left: -125px;
+ margin-top: -15px;
+ padding: 14px 0 2px 0;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 14px;
+ background-color: white;
+}
+
+.dataTables_length {
+ width: 40%;
+ float: left;
+}
+
+.dataTables_filter {
+ width: 50%;
+ float: right;
+ text-align: right;
+}
+
+.dataTables_info {
+ width: 60%;
+ float: left;
+}
+
+.dataTables_paginate {
+ float: right;
+ text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous, .paginate_enabled_previous,
+.paginate_disabled_next, .paginate_enabled_next {
+ height: 19px;
+ float: left;
+ cursor: pointer;
+ *cursor: hand;
+ color: #111 !important;
+}
+.paginate_disabled_previous:hover, .paginate_enabled_previous:hover,
+.paginate_disabled_next:hover, .paginate_enabled_next:hover {
+ text-decoration: none !important;
+}
+.paginate_disabled_previous:active, .paginate_enabled_previous:active,
+.paginate_disabled_next:active, .paginate_enabled_next:active {
+ outline: none;
+}
+
+.paginate_disabled_previous,
+.paginate_disabled_next {
+ color: #666 !important;
+}
+.paginate_disabled_previous, .paginate_enabled_previous {
+ padding-left: 23px;
+}
+.paginate_disabled_next, .paginate_enabled_next {
+ padding-right: 23px;
+ margin-left: 10px;
+}
+
+.paginate_disabled_previous {
+ background: url('../images/back_disabled.png') no-repeat top left;
+}
+
+.paginate_enabled_previous {
+ background: url('../images/back_enabled.png') no-repeat top left;
+}
+.paginate_enabled_previous:hover {
+ background: url('../images/back_enabled_hover.png') no-repeat top left;
+}
+
+.paginate_disabled_next {
+ background: url('../images/forward_disabled.png') no-repeat top right;
+}
+
+.paginate_enabled_next {
+ background: url('../images/forward_enabled.png') no-repeat top right;
+}
+.paginate_enabled_next:hover {
+ background: url('../images/forward_enabled_hover.png') no-repeat top right;
+}
+table.display {
+ margin: 0 auto;
+ clear: both;
+ width: 100%;
+
+}
+
+table.display thead th {
+ padding: 8px 18px 8px 10px;
+ border-bottom: 1px solid black;
+ font-weight: bold;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+table.display tfoot th {
+ padding: 3px 18px 3px 10px;
+ border-top: 1px solid black;
+ font-weight: bold;
+}
+
+table.display tr.heading2 td {
+ border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+ padding: 3px 10px;
+}
+
+table.display td.center {
+ text-align: center;
+}
+
+.sorting_asc {
+ background: url('../images/sort_asc.png') no-repeat center right;
+}
+
+.sorting_desc {
+ background: url('../images/sort_desc.png') no-repeat center right;
+}
+
+.sorting {
+ background: url('../images/sort_both.png') no-repeat center right;
+}
+
+.sorting_asc_disabled {
+ background: url('../images/sort_asc_disabled.png') no-repeat center right;
+}
+
+.sorting_desc_disabled {
+ background: url('../images/sort_desc_disabled.png') no-repeat center right;
+}
+
+table.display thead th:active,
+table.display thead td:active {
+ outline: none;
+}
+.dataTables_scroll {
+ clear: both;
+}
+
+.dataTables_scrollBody {
+ *margin-top: -1px;
+ -webkit-overflow-scrolling: touch;
+}
+
+.top, .bottom {
+ background-color: #F5F5F5;
+ border: 1px solid #CCCCCC;
+}
+
+.top .dataTables_info {
+ float: none;
+}
+
+.clear {
+ clear: both;
+}
+
+.dataTables_empty {
+ text-align: center;
+}
+
+tfoot input {
+ margin: 0.5em 0;
+ width: 100%;
+ color: #444;
+}
+
+tfoot input.search_init {
+ color: #999;
+}
+
+td.group {
+ background-color: #d1cfd0;
+ border-bottom: 2px solid #A19B9E;
+ border-top: 2px solid #A19B9E;
+}
+
+td.details {
+ background-color: #d1cfd0;
+ border: 2px solid #A19B9E;
+}
+
+
+.example_alt_pagination div.dataTables_info {
+ width: 40%;
+}
+
+.paging_full_numbers {
+ width: 400px;
+ height: 22px;
+ line-height: 22px;
+}
+
+.paging_full_numbers a:active {
+ outline: none
+}
+
+.paging_full_numbers a:hover {
+ text-decoration: none;
+}
+
+.paging_full_numbers a.paginate_button,
+ .paging_full_numbers a.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+ color: #333 !important;
+}
+
+.paging_full_numbers a.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers a.paginate_button:hover {
+ background-color: #ccc;
+ text-decoration: none !important;
+}
+
+.paging_full_numbers a.paginate_active {
+ background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+ background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+ background-color: #9FAFD1;
+}
+
+
+div.box {
+ height: 100px;
+ padding: 10px;
+ overflow: auto;
+ border: 1px solid #8080FF;
+ background-color: #E5E5FF;
+}
diff --git a/asset/static/css/datatables/css/demo_table_jui.css b/asset/static/css/datatables/css/demo_table_jui.css
new file mode 100755
index 0000000..a210af5
--- /dev/null
+++ b/asset/static/css/datatables/css/demo_table_jui.css
@@ -0,0 +1,501 @@
+/*
+ * File: demo_table_jui.css
+ * CVS: $Id$
+ * Description: CSS descriptions for DataTables demo pages
+ * Author: Allan Jardine
+ * Created: Tue May 12 06:47:22 BST 2009
+ * Modified: $Date$ by $Author$
+ * Language: CSS
+ * Project: DataTables
+ *
+ * Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ * 'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ * no conflict between the two pagination types. If you want to use full_numbers pagination
+ * ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ * modify that selector.
+ * Note that the path used for Images is relative. All images are by default located in
+ * ../images/ - relative to this CSS file.
+ */
+
+
+/*
+ * jQuery UI specific styling
+ */
+
+.paging_two_button .ui-button {
+ float: left;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+.paging_full_numbers .ui-button {
+ padding: 2px 6px;
+ margin: 0;
+ cursor: pointer;
+ * cursor: hand;
+ color: #333 !important;
+}
+
+.dataTables_paginate .ui-button {
+ margin-right: -0.1em !important;
+}
+
+.paging_full_numbers {
+ width: 350px !important;
+}
+
+.dataTables_wrapper .ui-toolbar {
+ padding: 5px;
+}
+
+.dataTables_paginate {
+ width: auto;
+}
+
+.dataTables_info {
+ padding-top: 3px;
+}
+
+table.display thead th {
+ padding: 3px 0px 3px 10px;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+div.dataTables_wrapper .ui-widget-header {
+ font-weight: normal;
+}
+
+
+/*
+ * Sort arrow icon positioning
+ */
+table.display thead th div.DataTables_sort_wrapper {
+ position: relative;
+ padding-right: 20px;
+}
+
+table.display thead th div.DataTables_sort_wrapper span {
+ position: absolute;
+ top: 50%;
+ margin-top: -8px;
+ right: 0;
+}
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ *
+ * Everything below this line is the same as demo_table.css. This file is
+ * required for 'cleanliness' of the markup
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+ position: relative;
+ clear: both;
+}
+
+.dataTables_processing {
+ position: absolute;
+ top: 0px;
+ left: 50%;
+ width: 250px;
+ margin-left: -125px;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 11px;
+ padding: 2px 0;
+}
+
+.dataTables_length {
+ width: 40%;
+ float: left;
+}
+
+.dataTables_filter {
+ width: 50%;
+ float: right;
+ text-align: right;
+}
+
+.dataTables_info {
+ width: 50%;
+ float: left;
+}
+
+.dataTables_paginate {
+ float: right;
+ text-align: right;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+ margin: 0 auto;
+ width: 100%;
+ clear: both;
+ border-collapse: collapse;
+}
+
+table.display tfoot th {
+ padding: 3px 0px 3px 10px;
+ font-weight: bold;
+ font-weight: normal;
+}
+
+table.display tr.heading2 td {
+ border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+ padding: 3px 10px;
+}
+
+table.display td.center {
+ text-align: center;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+ background: url('../images/sort_asc.png') no-repeat center right;
+}
+
+.sorting_desc {
+ background: url('../images/sort_desc.png') no-repeat center right;
+}
+
+.sorting {
+ background: url('../images/sort_both.png') no-repeat center right;
+}
+
+.sorting_asc_disabled {
+ background: url('../images/sort_asc_disabled.png') no-repeat center right;
+}
+
+.sorting_desc_disabled {
+ background: url('../images/sort_desc_disabled.png') no-repeat center right;
+}
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables row classes
+ */
+table.display tr.odd.gradeA {
+ background-color: #ddffdd;
+}
+
+table.display tr.even.gradeA {
+ background-color: #eeffee;
+}
+
+
+
+
+table.display tr.odd.gradeA {
+ background-color: #ddffdd;
+}
+
+table.display tr.even.gradeA {
+ background-color: #eeffee;
+}
+
+table.display tr.odd.gradeC {
+ background-color: #ddddff;
+}
+
+table.display tr.even.gradeC {
+ background-color: #eeeeff;
+}
+
+table.display tr.odd.gradeX {
+ background-color: #ffdddd;
+}
+
+table.display tr.even.gradeX {
+ background-color: #ffeeee;
+}
+
+table.display tr.odd.gradeU {
+ background-color: #ddd;
+}
+
+table.display tr.even.gradeU {
+ background-color: #eee;
+}
+
+
+tr.odd {
+ background-color: #E2E4FF;
+}
+
+tr.even {
+ background-color: white;
+}
+
+
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+ clear: both;
+}
+
+.dataTables_scrollBody {
+ -webkit-overflow-scrolling: touch;
+}
+
+.top, .bottom {
+ padding: 15px;
+ background-color: #F5F5F5;
+ border: 1px solid #CCCCCC;
+}
+
+.top .dataTables_info {
+ float: none;
+}
+
+.clear {
+ clear: both;
+}
+
+.dataTables_empty {
+ text-align: center;
+}
+
+tfoot input {
+ margin: 0.5em 0;
+ width: 100%;
+ color: #444;
+}
+
+tfoot input.search_init {
+ color: #999;
+}
+
+td.group {
+ background-color: #d1cfd0;
+ border-bottom: 2px solid #A19B9E;
+ border-top: 2px solid #A19B9E;
+}
+
+td.details {
+ background-color: #d1cfd0;
+ border: 2px solid #A19B9E;
+}
+
+
+.example_alt_pagination div.dataTables_info {
+ width: 40%;
+}
+
+.paging_full_numbers a.paginate_button,
+ .paging_full_numbers a.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+ color: #333 !important;
+}
+
+.paging_full_numbers a.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers a.paginate_button:hover {
+ background-color: #ccc;
+ text-decoration: none !important;
+}
+
+.paging_full_numbers a.paginate_active {
+ background-color: #99B3FF;
+}
+
+table.display tr.even.row_selected td {
+ background-color: #B0BED9;
+}
+
+table.display tr.odd.row_selected td {
+ background-color: #9FAFD1;
+}
+
+
+/*
+ * Sorting classes for columns
+ */
+/* For the standard odd/even */
+tr.odd td.sorting_1 {
+ background-color: #D3D6FF;
+}
+
+tr.odd td.sorting_2 {
+ background-color: #DADCFF;
+}
+
+tr.odd td.sorting_3 {
+ background-color: #E0E2FF;
+}
+
+tr.even td.sorting_1 {
+ background-color: #EAEBFF;
+}
+
+tr.even td.sorting_2 {
+ background-color: #F2F3FF;
+}
+
+tr.even td.sorting_3 {
+ background-color: #F9F9FF;
+}
+
+
+/* For the Conditional-CSS grading rows */
+/*
+ Colour calculations (based off the main row colours)
+ Level 1:
+ dd > c4
+ ee > d5
+ Level 2:
+ dd > d1
+ ee > e2
+ */
+tr.odd.gradeA td.sorting_1 {
+ background-color: #c4ffc4;
+}
+
+tr.odd.gradeA td.sorting_2 {
+ background-color: #d1ffd1;
+}
+
+tr.odd.gradeA td.sorting_3 {
+ background-color: #d1ffd1;
+}
+
+tr.even.gradeA td.sorting_1 {
+ background-color: #d5ffd5;
+}
+
+tr.even.gradeA td.sorting_2 {
+ background-color: #e2ffe2;
+}
+
+tr.even.gradeA td.sorting_3 {
+ background-color: #e2ffe2;
+}
+
+tr.odd.gradeC td.sorting_1 {
+ background-color: #c4c4ff;
+}
+
+tr.odd.gradeC td.sorting_2 {
+ background-color: #d1d1ff;
+}
+
+tr.odd.gradeC td.sorting_3 {
+ background-color: #d1d1ff;
+}
+
+tr.even.gradeC td.sorting_1 {
+ background-color: #d5d5ff;
+}
+
+tr.even.gradeC td.sorting_2 {
+ background-color: #e2e2ff;
+}
+
+tr.even.gradeC td.sorting_3 {
+ background-color: #e2e2ff;
+}
+
+tr.odd.gradeX td.sorting_1 {
+ background-color: #ffc4c4;
+}
+
+tr.odd.gradeX td.sorting_2 {
+ background-color: #ffd1d1;
+}
+
+tr.odd.gradeX td.sorting_3 {
+ background-color: #ffd1d1;
+}
+
+tr.even.gradeX td.sorting_1 {
+ background-color: #ffd5d5;
+}
+
+tr.even.gradeX td.sorting_2 {
+ background-color: #ffe2e2;
+}
+
+tr.even.gradeX td.sorting_3 {
+ background-color: #ffe2e2;
+}
+
+tr.odd.gradeU td.sorting_1 {
+ background-color: #c4c4c4;
+}
+
+tr.odd.gradeU td.sorting_2 {
+ background-color: #d1d1d1;
+}
+
+tr.odd.gradeU td.sorting_3 {
+ background-color: #d1d1d1;
+}
+
+tr.even.gradeU td.sorting_1 {
+ background-color: #d5d5d5;
+}
+
+tr.even.gradeU td.sorting_2 {
+ background-color: #e2e2e2;
+}
+
+tr.even.gradeU td.sorting_3 {
+ background-color: #e2e2e2;
+}
+
+
+/*
+ * Row highlighting example
+ */
+.ex_highlight #example tbody tr.even:hover, #example tbody tr.even td.highlighted {
+ background-color: #ECFFB3;
+}
+
+.ex_highlight #example tbody tr.odd:hover, #example tbody tr.odd td.highlighted {
+ background-color: #E6FF99;
+}
\ No newline at end of file
diff --git a/asset/static/css/datatables/css/jquery.dataTables.css b/asset/static/css/datatables/css/jquery.dataTables.css
new file mode 100755
index 0000000..7da7fae
--- /dev/null
+++ b/asset/static/css/datatables/css/jquery.dataTables.css
@@ -0,0 +1,221 @@
+
+/*
+ * Table
+ */
+table.dataTable {
+ margin: 0 auto;
+ clear: both;
+ width: 100%;
+}
+
+table.dataTable thead th {
+ padding: 3px 18px 3px 10px;
+ border-bottom: 1px solid black;
+ font-weight: bold;
+ cursor: pointer;
+ *cursor: hand;
+}
+
+table.dataTable tfoot th {
+ padding: 3px 18px 3px 10px;
+ border-top: 1px solid black;
+ font-weight: bold;
+}
+
+table.dataTable td {
+ padding: 3px 10px;
+}
+
+table.dataTable td.center,
+table.dataTable td.dataTables_empty {
+ text-align: center;
+}
+
+table.dataTable tr.odd { background-color: #E2E4FF; }
+table.dataTable tr.even { background-color: white; }
+
+table.dataTable tr.odd td.sorting_1 { background-color: #D3D6FF; }
+table.dataTable tr.odd td.sorting_2 { background-color: #DADCFF; }
+table.dataTable tr.odd td.sorting_3 { background-color: #E0E2FF; }
+table.dataTable tr.even td.sorting_1 { background-color: #EAEBFF; }
+table.dataTable tr.even td.sorting_2 { background-color: #F2F3FF; }
+table.dataTable tr.even td.sorting_3 { background-color: #F9F9FF; }
+
+
+/*
+ * Table wrapper
+ */
+.dataTables_wrapper {
+ position: relative;
+ clear: both;
+ *zoom: 1;
+}
+
+
+/*
+ * Page length menu
+ */
+.dataTables_length {
+ float: left;
+}
+
+
+/*
+ * Filter
+ */
+.dataTables_filter {
+ float: right;
+ text-align: right;
+}
+
+
+/*
+ * Table information
+ */
+.dataTables_info {
+ clear: both;
+ float: left;
+}
+
+
+/*
+ * Pagination
+ */
+.dataTables_paginate {
+ float: right;
+ text-align: right;
+}
+
+/* Two button pagination - previous / next */
+.paginate_disabled_previous,
+.paginate_enabled_previous,
+.paginate_disabled_next,
+.paginate_enabled_next {
+ height: 19px;
+ float: left;
+ cursor: pointer;
+ *cursor: hand;
+ color: #111 !important;
+}
+.paginate_disabled_previous:hover,
+.paginate_enabled_previous:hover,
+.paginate_disabled_next:hover,
+.paginate_enabled_next:hover {
+ text-decoration: none !important;
+}
+.paginate_disabled_previous:active,
+.paginate_enabled_previous:active,
+.paginate_disabled_next:active,
+.paginate_enabled_next:active {
+ outline: none;
+}
+
+.paginate_disabled_previous,
+.paginate_disabled_next {
+ color: #666 !important;
+}
+.paginate_disabled_previous,
+.paginate_enabled_previous {
+ padding-left: 23px;
+}
+.paginate_disabled_next,
+.paginate_enabled_next {
+ padding-right: 23px;
+ margin-left: 10px;
+}
+
+.paginate_enabled_previous { background: url('../images/back_enabled.png') no-repeat top left; }
+.paginate_enabled_previous:hover { background: url('../images/back_enabled_hover.png') no-repeat top left; }
+.paginate_disabled_previous { background: url('../images/back_disabled.png') no-repeat top left; }
+
+.paginate_enabled_next { background: url('../images/forward_enabled.png') no-repeat top right; }
+.paginate_enabled_next:hover { background: url('../images/forward_enabled_hover.png') no-repeat top right; }
+.paginate_disabled_next { background: url('../images/forward_disabled.png') no-repeat top right; }
+
+/* Full number pagination */
+.paging_full_numbers {
+ height: 22px;
+ line-height: 22px;
+}
+.paging_full_numbers a:active {
+ outline: none
+}
+.paging_full_numbers a:hover {
+ text-decoration: none;
+}
+
+.paging_full_numbers a.paginate_button,
+.paging_full_numbers a.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+ color: #333 !important;
+}
+
+.paging_full_numbers a.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers a.paginate_button:hover {
+ background-color: #ccc;
+ text-decoration: none !important;
+}
+
+.paging_full_numbers a.paginate_active {
+ background-color: #99B3FF;
+}
+
+
+/*
+ * Processing indicator
+ */
+.dataTables_processing {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 250px;
+ height: 30px;
+ margin-left: -125px;
+ margin-top: -15px;
+ padding: 14px 0 2px 0;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 14px;
+ background-color: white;
+}
+
+
+/*
+ * Sorting
+ */
+.sorting { background: url('../images/sort_both.png') no-repeat center right; }
+.sorting_asc { background: url('../images/sort_asc.png') no-repeat center right; }
+.sorting_desc { background: url('../images/sort_desc.png') no-repeat center right; }
+
+.sorting_asc_disabled { background: url('../images/sort_asc_disabled.png') no-repeat center right; }
+.sorting_desc_disabled { background: url('../images/sort_desc_disabled.png') no-repeat center right; }
+
+table.dataTable thead th:active,
+table.dataTable thead td:active {
+ outline: none;
+}
+
+
+/*
+ * Scrolling
+ */
+.dataTables_scroll {
+ clear: both;
+}
+
+.dataTables_scrollBody {
+ *margin-top: -1px;
+ -webkit-overflow-scrolling: touch;
+}
+
diff --git a/asset/static/css/datatables/css/jquery.dataTables_themeroller.css b/asset/static/css/datatables/css/jquery.dataTables_themeroller.css
new file mode 100755
index 0000000..cf1d4ed
--- /dev/null
+++ b/asset/static/css/datatables/css/jquery.dataTables_themeroller.css
@@ -0,0 +1,244 @@
+
+
+/*
+ * Table
+ */
+table.dataTable {
+ margin: 0 auto;
+ clear: both;
+ width: 100%;
+ border-collapse: collapse;
+}
+
+table.dataTable thead th {
+ padding: 3px 0px 3px 10px;
+ cursor: pointer;
+ *cursor: hand;
+}
+
+table.dataTable tfoot th {
+ padding: 3px 10px;
+}
+
+table.dataTable td {
+ padding: 3px 10px;
+}
+
+table.dataTable td.center,
+table.dataTable td.dataTables_empty {
+ text-align: center;
+}
+
+table.dataTable tr.odd { background-color: #E2E4FF; }
+table.dataTable tr.even { background-color: white; }
+
+table.dataTable tr.odd td.sorting_1 { background-color: #D3D6FF; }
+table.dataTable tr.odd td.sorting_2 { background-color: #DADCFF; }
+table.dataTable tr.odd td.sorting_3 { background-color: #E0E2FF; }
+table.dataTable tr.even td.sorting_1 { background-color: #EAEBFF; }
+table.dataTable tr.even td.sorting_2 { background-color: #F2F3FF; }
+table.dataTable tr.even td.sorting_3 { background-color: #F9F9FF; }
+
+
+/*
+ * Table wrapper
+ */
+.dataTables_wrapper {
+ position: relative;
+ clear: both;
+ *zoom: 1;
+}
+.dataTables_wrapper .ui-widget-header {
+ font-weight: normal;
+}
+.dataTables_wrapper .ui-toolbar {
+ padding: 5px;
+}
+
+
+/*
+ * Page length menu
+ */
+.dataTables_length {
+ float: left;
+}
+
+
+/*
+ * Filter
+ */
+.dataTables_filter {
+ float: right;
+ text-align: right;
+}
+
+
+/*
+ * Table information
+ */
+.dataTables_info {
+ padding-top: 3px;
+ clear: both;
+ float: left;
+}
+
+
+/*
+ * Pagination
+ */
+.dataTables_paginate {
+ float: right;
+ text-align: right;
+}
+
+.dataTables_paginate .ui-button {
+ margin-right: -0.1em !important;
+}
+
+.paging_two_button .ui-button {
+ float: left;
+ cursor: pointer;
+ * cursor: hand;
+}
+
+.paging_full_numbers .ui-button {
+ padding: 2px 6px;
+ margin: 0;
+ cursor: pointer;
+ * cursor: hand;
+ color: #333 !important;
+}
+
+/* Two button pagination - previous / next */
+.paginate_disabled_previous,
+.paginate_enabled_previous,
+.paginate_disabled_next,
+.paginate_enabled_next {
+ height: 19px;
+ float: left;
+ cursor: pointer;
+ *cursor: hand;
+ color: #111 !important;
+}
+.paginate_disabled_previous:hover,
+.paginate_enabled_previous:hover,
+.paginate_disabled_next:hover,
+.paginate_enabled_next:hover {
+ text-decoration: none !important;
+}
+.paginate_disabled_previous:active,
+.paginate_enabled_previous:active,
+.paginate_disabled_next:active,
+.paginate_enabled_next:active {
+ outline: none;
+}
+
+.paginate_disabled_previous,
+.paginate_disabled_next {
+ color: #666 !important;
+}
+.paginate_disabled_previous,
+.paginate_enabled_previous {
+ padding-left: 23px;
+}
+.paginate_disabled_next,
+.paginate_enabled_next {
+ padding-right: 23px;
+ margin-left: 10px;
+}
+
+.paginate_enabled_previous { background: url('../images/back_enabled.png') no-repeat top left; }
+.paginate_enabled_previous:hover { background: url('../images/back_enabled_hover.png') no-repeat top left; }
+.paginate_disabled_previous { background: url('../images/back_disabled.png') no-repeat top left; }
+
+.paginate_enabled_next { background: url('../images/forward_enabled.png') no-repeat top right; }
+.paginate_enabled_next:hover { background: url('../images/forward_enabled_hover.png') no-repeat top right; }
+.paginate_disabled_next { background: url('../images/forward_disabled.png') no-repeat top right; }
+
+/* Full number pagination */
+.paging_full_numbers a:active {
+ outline: none
+}
+.paging_full_numbers a:hover {
+ text-decoration: none;
+}
+
+.paging_full_numbers a.paginate_button,
+.paging_full_numbers a.paginate_active {
+ border: 1px solid #aaa;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+ padding: 2px 5px;
+ margin: 0 3px;
+ cursor: pointer;
+ *cursor: hand;
+ color: #333 !important;
+}
+
+.paging_full_numbers a.paginate_button {
+ background-color: #ddd;
+}
+
+.paging_full_numbers a.paginate_button:hover {
+ background-color: #ccc;
+ text-decoration: none !important;
+}
+
+.paging_full_numbers a.paginate_active {
+ background-color: #99B3FF;
+}
+
+
+/*
+ * Processing indicator
+ */
+.dataTables_processing {
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ width: 250px;
+ height: 30px;
+ margin-left: -125px;
+ margin-top: -15px;
+ padding: 14px 0 2px 0;
+ border: 1px solid #ddd;
+ text-align: center;
+ color: #999;
+ font-size: 14px;
+ background-color: white;
+}
+
+
+/*
+ * Sorting
+ */
+table.dataTable thead th div.DataTables_sort_wrapper {
+ position: relative;
+ padding-right: 20px;
+}
+
+table.dataTable thead th div.DataTables_sort_wrapper span {
+ position: absolute;
+ top: 50%;
+ margin-top: -8px;
+ right: 0;
+}
+
+table.dataTable th:active {
+ outline: none;
+}
+
+
+/*
+ * Scrolling
+ */
+.dataTables_scroll {
+ clear: both;
+}
+
+.dataTables_scrollBody {
+ *margin-top: -1px;
+ -webkit-overflow-scrolling: touch;
+}
+
diff --git a/asset/static/css/datatables/images/Sorting icons.psd b/asset/static/css/datatables/images/Sorting icons.psd
new file mode 100755
index 0000000..53b2e06
Binary files /dev/null and b/asset/static/css/datatables/images/Sorting icons.psd differ
diff --git a/asset/static/css/datatables/images/back_disabled.png b/asset/static/css/datatables/images/back_disabled.png
new file mode 100755
index 0000000..881de79
Binary files /dev/null and b/asset/static/css/datatables/images/back_disabled.png differ
diff --git a/asset/static/css/datatables/images/back_enabled.png b/asset/static/css/datatables/images/back_enabled.png
new file mode 100755
index 0000000..c608682
Binary files /dev/null and b/asset/static/css/datatables/images/back_enabled.png differ
diff --git a/asset/static/css/datatables/images/back_enabled_hover.png b/asset/static/css/datatables/images/back_enabled_hover.png
new file mode 100755
index 0000000..d300f10
Binary files /dev/null and b/asset/static/css/datatables/images/back_enabled_hover.png differ
diff --git a/asset/static/css/datatables/images/favicon.ico b/asset/static/css/datatables/images/favicon.ico
new file mode 100755
index 0000000..6eeaa2a
Binary files /dev/null and b/asset/static/css/datatables/images/favicon.ico differ
diff --git a/asset/static/css/datatables/images/forward_disabled.png b/asset/static/css/datatables/images/forward_disabled.png
new file mode 100755
index 0000000..6a6ded7
Binary files /dev/null and b/asset/static/css/datatables/images/forward_disabled.png differ
diff --git a/asset/static/css/datatables/images/forward_enabled.png b/asset/static/css/datatables/images/forward_enabled.png
new file mode 100755
index 0000000..a4e6b53
Binary files /dev/null and b/asset/static/css/datatables/images/forward_enabled.png differ
diff --git a/asset/static/css/datatables/images/forward_enabled_hover.png b/asset/static/css/datatables/images/forward_enabled_hover.png
new file mode 100755
index 0000000..fc46c5e
Binary files /dev/null and b/asset/static/css/datatables/images/forward_enabled_hover.png differ
diff --git a/asset/static/css/datatables/images/sort_asc.png b/asset/static/css/datatables/images/sort_asc.png
new file mode 100755
index 0000000..a88d797
Binary files /dev/null and b/asset/static/css/datatables/images/sort_asc.png differ
diff --git a/asset/static/css/datatables/images/sort_asc_disabled.png b/asset/static/css/datatables/images/sort_asc_disabled.png
new file mode 100755
index 0000000..4e144cf
Binary files /dev/null and b/asset/static/css/datatables/images/sort_asc_disabled.png differ
diff --git a/asset/static/css/datatables/images/sort_both.png b/asset/static/css/datatables/images/sort_both.png
new file mode 100755
index 0000000..1867040
Binary files /dev/null and b/asset/static/css/datatables/images/sort_both.png differ
diff --git a/asset/static/css/datatables/images/sort_desc.png b/asset/static/css/datatables/images/sort_desc.png
new file mode 100755
index 0000000..def071e
Binary files /dev/null and b/asset/static/css/datatables/images/sort_desc.png differ
diff --git a/asset/static/css/datatables/images/sort_desc_disabled.png b/asset/static/css/datatables/images/sort_desc_disabled.png
new file mode 100755
index 0000000..7824973
Binary files /dev/null and b/asset/static/css/datatables/images/sort_desc_disabled.png differ
diff --git a/asset/static/css/datatables/tools/css/dataTables.tableTools.css b/asset/static/css/datatables/tools/css/dataTables.tableTools.css
new file mode 100755
index 0000000..cf38987
--- /dev/null
+++ b/asset/static/css/datatables/tools/css/dataTables.tableTools.css
@@ -0,0 +1,336 @@
+/*
+ * File: TableTools.css
+ * Description: Styles for TableTools 2
+ * Author: Allan Jardine (www.sprymedia.co.uk)
+ * Language: Javascript
+ * License: GPL v2 / 3 point BSD
+ * Project: DataTables
+ *
+ * Copyright 2009-2012 Allan Jardine, all rights reserved.
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ *
+ * CSS name space:
+ * DTTT DataTables TableTools
+ *
+ * Style sheet provides:
+ * CONTAINER TableTools container element and styles applying to all components
+ * BUTTON_STYLES Action specific button styles
+ * SELECTING Row selection styles
+ * COLLECTIONS Drop down list (collection) styles
+ * PRINTING Print display styles
+ */
+
+
+/*
+ * CONTAINER
+ * TableTools container element and styles applying to all components
+ */
+div.DTTT_container {
+ position: relative;
+ float: right;
+ margin-bottom: 1em;
+}
+
+@media screen and (max-width: 640px) {
+ div.DTTT_container {
+ float: none !important;
+ text-align: center;
+ }
+
+ div.DTTT_container:after {
+ visibility: hidden;
+ display: block;
+ content: "";
+ clear: both;
+ height: 0;
+ }
+}
+
+
+button.DTTT_button,
+div.DTTT_button,
+a.DTTT_button {
+ position: relative;
+ display: inline-block;
+ margin-right: 3px;
+ padding: 5px 8px;
+ border: 1px solid #999;
+ cursor: pointer;
+ *cursor: hand;
+ font-size: 0.88em;
+
+ -webkit-border-radius: 2px;
+ -moz-border-radius: 2px;
+ -ms-border-radius: 2px;
+ -o-border-radius: 2px;
+ border-radius: 2px;
+
+ -webkit-box-shadow: 1px 1px 3px #ccc;
+ -moz-box-shadow: 1px 1px 3px #ccc;
+ -ms-box-shadow: 1px 1px 3px #ccc;
+ -o-box-shadow: 1px 1px 3px #ccc;
+ box-shadow: 1px 1px 3px #ccc;
+
+ /* Generated by http://www.colorzilla.com/gradient-editor/ */
+ background: #ffffff; /* Old browsers */
+ background: -webkit-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* Chrome10+,Safari5.1+ */
+ background: -moz-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* FF3.6+ */
+ background: -ms-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* IE10+ */
+ background: -o-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* Opera 11.10+ */
+ background: linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* W3C */
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#ffffff', endColorstr='#f9f9f9',GradientType=0 ); /* IE6-9 */
+}
+
+
+/* Buttons are cunning border-box sizing - we can't just use that for A and DIV due to IE6/7 */
+button.DTTT_button {
+ height: 30px;
+ padding: 3px 8px;
+}
+
+.DTTT_button embed {
+ outline: none;
+}
+
+button.DTTT_button:hover,
+div.DTTT_button:hover,
+a.DTTT_button:hover {
+ border: 1px solid #666;
+ text-decoration: none !important;
+
+ -webkit-box-shadow: 1px 1px 3px #999;
+ -moz-box-shadow: 1px 1px 3px #999;
+ -ms-box-shadow: 1px 1px 3px #999;
+ -o-box-shadow: 1px 1px 3px #999;
+ box-shadow: 1px 1px 3px #999;
+
+ background: #f3f3f3; /* Old browsers */
+ background: -webkit-linear-gradient(top, #f3f3f3 0%,#e2e2e2 89%,#f4f4f4 100%); /* Chrome10+,Safari5.1+ */
+ background: -moz-linear-gradient(top, #f3f3f3 0%,#e2e2e2 89%,#f4f4f4 100%); /* FF3.6+ */
+ background: -ms-linear-gradient(top, #f3f3f3 0%,#e2e2e2 89%,#f4f4f4 100%); /* IE10+ */
+ background: -o-linear-gradient(top, #f3f3f3 0%,#e2e2e2 89%,#f4f4f4 100%); /* Opera 11.10+ */
+ background: linear-gradient(top, #f3f3f3 0%,#e2e2e2 89%,#f4f4f4 100%); /* W3C */
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#f3f3f3', endColorstr='#f4f4f4',GradientType=0 ); /* IE6-9 */
+}
+
+button.DTTT_disabled,
+div.DTTT_disabled,
+a.DTTT_disabled {
+ color: #999;
+ border: 1px solid #d0d0d0;
+
+ background: #ffffff; /* Old browsers */
+ background: -webkit-linear-gradient(top, #ffffff 0%,#f9f9f9 89%,#fafafa 100%); /* Chrome10+,Safari5.1+ */
+ background: -moz-linear-gradient(top, #ffffff 0%,#f9f9f9 89%,#fafafa 100%); /* FF3.6+ */
+ background: -ms-linear-gradient(top, #ffffff 0%,#f9f9f9 89%,#fafafa 100%); /* IE10+ */
+ background: -o-linear-gradient(top, #ffffff 0%,#f9f9f9 89%,#fafafa 100%); /* Opera 11.10+ */
+ background: linear-gradient(top, #ffffff 0%,#f9f9f9 89%,#fafafa 100%); /* W3C */
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#ffffff', endColorstr='#fafafa',GradientType=0 ); /* IE6-9 */
+}
+
+
+
+/*
+ * BUTTON_STYLES
+ * Action specific button styles
+ * If you want images - comment this back in
+
+a.DTTT_button_csv,
+a.DTTT_button_xls,
+a.DTTT_button_copy,
+a.DTTT_button_pdf,
+a.DTTT_button_print {
+ padding-right: 0px;
+}
+
+a.DTTT_button_csv span,
+a.DTTT_button_xls span,
+a.DTTT_button_copy span,
+a.DTTT_button_pdf span,
+a.DTTT_button_print span {
+ display: inline-block;
+ height: 24px;
+ line-height: 24px;
+ padding-right: 30px;
+}
+
+
+a.DTTT_button_csv span { background: url(../images/csv.png) no-repeat bottom right; }
+a.DTTT_button_csv:hover span { background: url(../images/csv_hover.png) no-repeat center right; }
+
+a.DTTT_button_xls span { background: url(../images/xls.png) no-repeat center right; }
+a.DTTT_button_xls:hover span { background: #f0f0f0 url(../images/xls_hover.png) no-repeat center right; }
+
+a.DTTT_button_copy span { background: url(../images/copy.png) no-repeat center right; }
+a.DTTT_button_copy:hover span { background: #f0f0f0 url(../images/copy_hover.png) no-repeat center right; }
+
+a.DTTT_button_pdf span { background: url(../images/pdf.png) no-repeat center right; }
+a.DTTT_button_pdf:hover span { background: #f0f0f0 url(../images/pdf_hover.png) no-repeat center right; }
+
+a.DTTT_button_print span { background: url(../images/print.png) no-repeat center right; }
+a.DTTT_button_print:hover span { background: #f0f0f0 url(../images/print_hover.png) no-repeat center right; }
+
+ */
+
+button.DTTT_button_collection span {
+ padding-right: 17px;
+ background: url(../images/collection.png) no-repeat center right;
+}
+
+button.DTTT_button_collection:hover span {
+ padding-right: 17px;
+ background: #f0f0f0 url(../images/collection_hover.png) no-repeat center right;
+}
+
+
+/*
+ * SELECTING
+ * Row selection styles
+ */
+table.DTTT_selectable tbody tr {
+ cursor: pointer;
+ *cursor: hand;
+}
+
+table.dataTable tr.DTTT_selected.odd {
+ background-color: #9FAFD1;
+}
+
+table.dataTable tr.DTTT_selected.odd td.sorting_1 {
+ background-color: #9FAFD1;
+}
+
+table.dataTable tr.DTTT_selected.odd td.sorting_2 {
+ background-color: #9FAFD1;
+}
+
+table.dataTable tr.DTTT_selected.odd td.sorting_3 {
+ background-color: #9FAFD1;
+}
+
+
+table.dataTable tr.DTTT_selected.even {
+ background-color: #B0BED9;
+}
+
+table.dataTable tr.DTTT_selected.even td.sorting_1 {
+ background-color: #B0BED9;
+}
+
+table.dataTable tr.DTTT_selected.even td.sorting_2 {
+ background-color: #B0BED9;
+}
+
+table.dataTable tr.DTTT_selected.even td.sorting_3 {
+ background-color: #B0BED9;
+}
+
+
+/*
+ * COLLECTIONS
+ * Drop down list (collection) styles
+ */
+
+div.DTTT_collection {
+ width: 150px;
+ padding: 8px 8px 4px 8px;
+ border: 1px solid #ccc;
+ border: 1px solid rgba( 0, 0, 0, 0.4 );
+ background-color: #f3f3f3;
+ background-color: rgba( 255, 255, 255, 0.3 );
+ overflow: hidden;
+ z-index: 2002;
+
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ -ms-border-radius: 5px;
+ -o-border-radius: 5px;
+ border-radius: 5px;
+
+ -webkit-box-shadow: 3px 3px 5px rgba(0, 0, 0, 0.3);
+ -moz-box-shadow: 3px 3px 5px rgba(0, 0, 0, 0.3);
+ -ms-box-shadow: 3px 3px 5px rgba(0, 0, 0, 0.3);
+ -o-box-shadow: 3px 3px 5px rgba(0, 0, 0, 0.3);
+ box-shadow: 3px 3px 5px rgba(0, 0, 0, 0.3);
+}
+
+div.DTTT_collection_background {
+ background: transparent url(../images/background.png) repeat top left;
+ z-index: 2001;
+}
+
+div.DTTT_collection button.DTTT_button,
+div.DTTT_collection div.DTTT_button,
+div.DTTT_collection a.DTTT_button {
+ position: relative;
+ left: 0;
+ right: 0;
+
+ display: block;
+ float: none;
+ margin-bottom: 4px;
+
+ -webkit-box-shadow: 1px 1px 3px #999;
+ -moz-box-shadow: 1px 1px 3px #999;
+ -ms-box-shadow: 1px 1px 3px #999;
+ -o-box-shadow: 1px 1px 3px #999;
+ box-shadow: 1px 1px 3px #999;
+}
+
+
+/*
+ * PRINTING
+ * Print display styles
+ */
+
+.DTTT_print_info {
+ position: fixed;
+ top: 50%;
+ left: 50%;
+ width: 400px;
+ height: 150px;
+ margin-left: -200px;
+ margin-top: -75px;
+ text-align: center;
+ color: #333;
+ padding: 10px 30px;
+
+ background: #ffffff; /* Old browsers */
+ background: -webkit-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* Chrome10+,Safari5.1+ */
+ background: -moz-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* FF3.6+ */
+ background: -ms-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* IE10+ */
+ background: -o-linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* Opera 11.10+ */
+ background: linear-gradient(top, #ffffff 0%,#f3f3f3 89%,#f9f9f9 100%); /* W3C */
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#ffffff', endColorstr='#f9f9f9',GradientType=0 ); /* IE6-9 */
+
+ opacity: 0.95;
+
+ border: 1px solid black;
+ border: 1px solid rgba(0, 0, 0, 0.5);
+
+ -webkit-border-radius: 6px;
+ -moz-border-radius: 6px;
+ -ms-border-radius: 6px;
+ -o-border-radius: 6px;
+ border-radius: 6px;
+
+ -webkit-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5);
+ -moz-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5);
+ -ms-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5);
+ -o-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5);
+ box-shadow: 0 3px 7px rgba(0, 0, 0, 0.5);
+}
+
+.DTTT_print_info h6 {
+ font-weight: normal;
+ font-size: 28px;
+ line-height: 28px;
+ margin: 1em;
+}
+
+.DTTT_print_info p {
+ font-size: 14px;
+ line-height: 20px;
+}
+
diff --git a/asset/static/css/editor/external/google-code-prettify/prettify.css b/asset/static/css/editor/external/google-code-prettify/prettify.css
new file mode 100755
index 0000000..d44b3a2
--- /dev/null
+++ b/asset/static/css/editor/external/google-code-prettify/prettify.css
@@ -0,0 +1 @@
+.pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee}
\ No newline at end of file
diff --git a/asset/static/css/editor/index.css b/asset/static/css/editor/index.css
new file mode 100755
index 0000000..7281167
--- /dev/null
+++ b/asset/static/css/editor/index.css
@@ -0,0 +1,42 @@
+/* this CSS is not part of the widget, it is here just as an example of the demo page styling.... Don't copy this one, roll your own. One
+ * of the key things about the widget is that it allows you to do your own styling!
+ */
+
+#editor {
+ max-height: 250px;
+ height: 250px;
+ background-color: white;
+ border-collapse: separate;
+ border: 1px solid rgb(204, 204, 204);
+ padding: 4px;
+ box-sizing: content-box;
+ -webkit-box-shadow: rgba(0, 0, 0, 0.0745098) 0px 1px 1px 0px inset;
+ box-shadow: rgba(0, 0, 0, 0.0745098) 0px 1px 1px 0px inset;
+ border-top-right-radius: 3px; border-bottom-right-radius: 3px;
+ border-bottom-left-radius: 3px; border-top-left-radius: 3px;
+ overflow: scroll;
+ outline: none;
+}
+#voiceBtn {
+ width: 20px;
+ color: transparent;
+ background-color: transparent;
+ transform: scale(2.0, 2.0);
+ -webkit-transform: scale(2.0, 2.0);
+ -moz-transform: scale(2.0, 2.0);
+ border: transparent;
+ cursor: pointer;
+ box-shadow: none;
+ -webkit-box-shadow: none;
+}
+
+div[data-role="editor-toolbar"] {
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+
+.dropdown-menu a {
+ cursor: pointer;
+}
diff --git a/asset/static/css/floatexamples.css b/asset/static/css/floatexamples.css
new file mode 100755
index 0000000..82fd44c
--- /dev/null
+++ b/asset/static/css/floatexamples.css
@@ -0,0 +1,22 @@
+
+.demo-container {
+ box-sizing: border-box;
+ width: 100%;
+ height: 350px; display: block; margin-bottom: 10px;
+}
+
+.demo-placeholder {
+ width: 100%;
+ height: 100%;
+ font-size: 14px;
+ line-height: 1.2em;
+}
+
+.legend table {
+ border-spacing: 5px;
+}
+.pieLabel {
+ border: 1px solid #CAC9C9;
+ border-radius: 4px;
+ padding: 1px 1px;
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/_all.css b/asset/static/css/icheck/flat/_all.css
new file mode 100755
index 0000000..e9d0ceb
--- /dev/null
+++ b/asset/static/css/icheck/flat/_all.css
@@ -0,0 +1,530 @@
+/* iCheck plugin Flat skin
+----------------------------------- */
+.icheckbox_flat,
+.iradio_flat {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(flat.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat {
+ background-position: 0 0;
+}
+ .icheckbox_flat.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat {
+ background-position: -88px 0;
+}
+ .iradio_flat.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat,
+ .iradio_flat {
+ background-image: url(flat@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* red */
+.icheckbox_flat-red,
+.iradio_flat-red {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(red.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-red {
+ background-position: 0 0;
+}
+ .icheckbox_flat-red.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-red.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-red.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-red {
+ background-position: -88px 0;
+}
+ .iradio_flat-red.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-red.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-red.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-red,
+ .iradio_flat-red {
+ background-image: url(red@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* green */
+.icheckbox_flat-green,
+.iradio_flat-green {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(green.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-green {
+ background-position: 0 0;
+}
+ .icheckbox_flat-green.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-green.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-green.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-green {
+ background-position: -88px 0;
+}
+ .iradio_flat-green.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-green.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-green.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-green,
+ .iradio_flat-green {
+ background-image: url(green@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* blue */
+.icheckbox_flat-blue,
+.iradio_flat-blue {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(blue.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-blue {
+ background-position: 0 0;
+}
+ .icheckbox_flat-blue.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-blue.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-blue.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-blue {
+ background-position: -88px 0;
+}
+ .iradio_flat-blue.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-blue.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-blue.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-blue,
+ .iradio_flat-blue {
+ background-image: url(blue@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* aero */
+.icheckbox_flat-aero,
+.iradio_flat-aero {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(aero.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-aero {
+ background-position: 0 0;
+}
+ .icheckbox_flat-aero.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-aero.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-aero.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-aero {
+ background-position: -88px 0;
+}
+ .iradio_flat-aero.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-aero.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-aero.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-aero,
+ .iradio_flat-aero {
+ background-image: url(aero@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* grey */
+.icheckbox_flat-grey,
+.iradio_flat-grey {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(grey.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-grey {
+ background-position: 0 0;
+}
+ .icheckbox_flat-grey.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-grey.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-grey.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-grey {
+ background-position: -88px 0;
+}
+ .iradio_flat-grey.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-grey.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-grey.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-grey,
+ .iradio_flat-grey {
+ background-image: url(grey@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* orange */
+.icheckbox_flat-orange,
+.iradio_flat-orange {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(orange.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-orange {
+ background-position: 0 0;
+}
+ .icheckbox_flat-orange.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-orange.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-orange.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-orange {
+ background-position: -88px 0;
+}
+ .iradio_flat-orange.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-orange.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-orange.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-orange,
+ .iradio_flat-orange {
+ background-image: url(orange@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* yellow */
+.icheckbox_flat-yellow,
+.iradio_flat-yellow {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(yellow.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-yellow {
+ background-position: 0 0;
+}
+ .icheckbox_flat-yellow.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-yellow.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-yellow.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-yellow {
+ background-position: -88px 0;
+}
+ .iradio_flat-yellow.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-yellow.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-yellow.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-yellow,
+ .iradio_flat-yellow {
+ background-image: url(yellow@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* pink */
+.icheckbox_flat-pink,
+.iradio_flat-pink {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(pink.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-pink {
+ background-position: 0 0;
+}
+ .icheckbox_flat-pink.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-pink.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-pink.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-pink {
+ background-position: -88px 0;
+}
+ .iradio_flat-pink.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-pink.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-pink.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-pink,
+ .iradio_flat-pink {
+ background-image: url(pink@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
+
+/* purple */
+.icheckbox_flat-purple,
+.iradio_flat-purple {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(purple.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-purple {
+ background-position: 0 0;
+}
+ .icheckbox_flat-purple.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-purple.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-purple.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-purple {
+ background-position: -88px 0;
+}
+ .iradio_flat-purple.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-purple.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-purple.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-purple,
+ .iradio_flat-purple {
+ background-image: url(purple@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/aero.css b/asset/static/css/icheck/flat/aero.css
new file mode 100755
index 0000000..71cbca9
--- /dev/null
+++ b/asset/static/css/icheck/flat/aero.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, aero
+----------------------------------- */
+.icheckbox_flat-aero,
+.iradio_flat-aero {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(aero.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-aero {
+ background-position: 0 0;
+}
+ .icheckbox_flat-aero.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-aero.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-aero.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-aero {
+ background-position: -88px 0;
+}
+ .iradio_flat-aero.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-aero.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-aero.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-aero,
+ .iradio_flat-aero {
+ background-image: url(aero@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/aero.png b/asset/static/css/icheck/flat/aero.png
new file mode 100755
index 0000000..f4277aa
Binary files /dev/null and b/asset/static/css/icheck/flat/aero.png differ
diff --git a/asset/static/css/icheck/flat/aero@2x.png b/asset/static/css/icheck/flat/aero@2x.png
new file mode 100755
index 0000000..a9a7494
Binary files /dev/null and b/asset/static/css/icheck/flat/aero@2x.png differ
diff --git a/asset/static/css/icheck/flat/blue.css b/asset/static/css/icheck/flat/blue.css
new file mode 100755
index 0000000..56a7830
--- /dev/null
+++ b/asset/static/css/icheck/flat/blue.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, blue
+----------------------------------- */
+.icheckbox_flat-blue,
+.iradio_flat-blue {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(blue.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-blue {
+ background-position: 0 0;
+}
+ .icheckbox_flat-blue.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-blue.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-blue.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-blue {
+ background-position: -88px 0;
+}
+ .iradio_flat-blue.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-blue.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-blue.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-blue,
+ .iradio_flat-blue {
+ background-image: url(blue@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/blue.png b/asset/static/css/icheck/flat/blue.png
new file mode 100755
index 0000000..4b6ef98
Binary files /dev/null and b/asset/static/css/icheck/flat/blue.png differ
diff --git a/asset/static/css/icheck/flat/blue@2x.png b/asset/static/css/icheck/flat/blue@2x.png
new file mode 100755
index 0000000..d52da05
Binary files /dev/null and b/asset/static/css/icheck/flat/blue@2x.png differ
diff --git a/asset/static/css/icheck/flat/flat.css b/asset/static/css/icheck/flat/flat.css
new file mode 100755
index 0000000..0f39690
--- /dev/null
+++ b/asset/static/css/icheck/flat/flat.css
@@ -0,0 +1,53 @@
+/* iCheck plugin flat skin, black
+----------------------------------- */
+.icheckbox_flat,
+.iradio_flat {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(flat.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat {
+ background-position: 0 0;
+}
+ .icheckbox_flat.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat {
+ background-position: -88px 0;
+}
+ .iradio_flat.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat,
+ .iradio_flat {
+ background-image: url(flat@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/flat.png b/asset/static/css/icheck/flat/flat.png
new file mode 100755
index 0000000..15af826
Binary files /dev/null and b/asset/static/css/icheck/flat/flat.png differ
diff --git a/asset/static/css/icheck/flat/flat@2x.png b/asset/static/css/icheck/flat/flat@2x.png
new file mode 100755
index 0000000..e70e438
Binary files /dev/null and b/asset/static/css/icheck/flat/flat@2x.png differ
diff --git a/asset/static/css/icheck/flat/green.css b/asset/static/css/icheck/flat/green.css
new file mode 100755
index 0000000..b80e04d
--- /dev/null
+++ b/asset/static/css/icheck/flat/green.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, green
+----------------------------------- */
+.icheckbox_flat-green,
+.iradio_flat-green {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(green.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-green {
+ background-position: 0 0;
+}
+ .icheckbox_flat-green.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-green.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-green.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-green {
+ background-position: -88px 0;
+}
+ .iradio_flat-green.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-green.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-green.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-green,
+ .iradio_flat-green {
+ background-image: url(green@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/green.png b/asset/static/css/icheck/flat/green.png
new file mode 100755
index 0000000..6b303fb
Binary files /dev/null and b/asset/static/css/icheck/flat/green.png differ
diff --git a/asset/static/css/icheck/flat/green@2x.png b/asset/static/css/icheck/flat/green@2x.png
new file mode 100755
index 0000000..92b4411
Binary files /dev/null and b/asset/static/css/icheck/flat/green@2x.png differ
diff --git a/asset/static/css/icheck/flat/grey.css b/asset/static/css/icheck/flat/grey.css
new file mode 100755
index 0000000..96e62e8
--- /dev/null
+++ b/asset/static/css/icheck/flat/grey.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, grey
+----------------------------------- */
+.icheckbox_flat-grey,
+.iradio_flat-grey {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(grey.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-grey {
+ background-position: 0 0;
+}
+ .icheckbox_flat-grey.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-grey.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-grey.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-grey {
+ background-position: -88px 0;
+}
+ .iradio_flat-grey.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-grey.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-grey.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-grey,
+ .iradio_flat-grey {
+ background-image: url(grey@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/grey.png b/asset/static/css/icheck/flat/grey.png
new file mode 100755
index 0000000..c6e2873
Binary files /dev/null and b/asset/static/css/icheck/flat/grey.png differ
diff --git a/asset/static/css/icheck/flat/grey@2x.png b/asset/static/css/icheck/flat/grey@2x.png
new file mode 100755
index 0000000..0b47b1c
Binary files /dev/null and b/asset/static/css/icheck/flat/grey@2x.png differ
diff --git a/asset/static/css/icheck/flat/orange.css b/asset/static/css/icheck/flat/orange.css
new file mode 100755
index 0000000..f9c873f
--- /dev/null
+++ b/asset/static/css/icheck/flat/orange.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, orange
+----------------------------------- */
+.icheckbox_flat-orange,
+.iradio_flat-orange {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(orange.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-orange {
+ background-position: 0 0;
+}
+ .icheckbox_flat-orange.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-orange.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-orange.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-orange {
+ background-position: -88px 0;
+}
+ .iradio_flat-orange.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-orange.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-orange.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-orange,
+ .iradio_flat-orange {
+ background-image: url(orange@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/orange.png b/asset/static/css/icheck/flat/orange.png
new file mode 100755
index 0000000..ec2532e
Binary files /dev/null and b/asset/static/css/icheck/flat/orange.png differ
diff --git a/asset/static/css/icheck/flat/orange@2x.png b/asset/static/css/icheck/flat/orange@2x.png
new file mode 100755
index 0000000..9350b50
Binary files /dev/null and b/asset/static/css/icheck/flat/orange@2x.png differ
diff --git a/asset/static/css/icheck/flat/pink.css b/asset/static/css/icheck/flat/pink.css
new file mode 100755
index 0000000..179f980
--- /dev/null
+++ b/asset/static/css/icheck/flat/pink.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, pink
+----------------------------------- */
+.icheckbox_flat-pink,
+.iradio_flat-pink {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(pink.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-pink {
+ background-position: 0 0;
+}
+ .icheckbox_flat-pink.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-pink.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-pink.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-pink {
+ background-position: -88px 0;
+}
+ .iradio_flat-pink.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-pink.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-pink.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-pink,
+ .iradio_flat-pink {
+ background-image: url(pink@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/pink.png b/asset/static/css/icheck/flat/pink.png
new file mode 100755
index 0000000..3e65d9d
Binary files /dev/null and b/asset/static/css/icheck/flat/pink.png differ
diff --git a/asset/static/css/icheck/flat/pink@2x.png b/asset/static/css/icheck/flat/pink@2x.png
new file mode 100755
index 0000000..281ba06
Binary files /dev/null and b/asset/static/css/icheck/flat/pink@2x.png differ
diff --git a/asset/static/css/icheck/flat/purple.css b/asset/static/css/icheck/flat/purple.css
new file mode 100755
index 0000000..dfedafc
--- /dev/null
+++ b/asset/static/css/icheck/flat/purple.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, purple
+----------------------------------- */
+.icheckbox_flat-purple,
+.iradio_flat-purple {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(purple.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-purple {
+ background-position: 0 0;
+}
+ .icheckbox_flat-purple.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-purple.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-purple.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-purple {
+ background-position: -88px 0;
+}
+ .iradio_flat-purple.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-purple.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-purple.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-purple,
+ .iradio_flat-purple {
+ background-image: url(purple@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/purple.png b/asset/static/css/icheck/flat/purple.png
new file mode 100755
index 0000000..3699fd5
Binary files /dev/null and b/asset/static/css/icheck/flat/purple.png differ
diff --git a/asset/static/css/icheck/flat/purple@2x.png b/asset/static/css/icheck/flat/purple@2x.png
new file mode 100755
index 0000000..7f4be74
Binary files /dev/null and b/asset/static/css/icheck/flat/purple@2x.png differ
diff --git a/asset/static/css/icheck/flat/red.css b/asset/static/css/icheck/flat/red.css
new file mode 100755
index 0000000..83ec91e
--- /dev/null
+++ b/asset/static/css/icheck/flat/red.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, red
+----------------------------------- */
+.icheckbox_flat-red,
+.iradio_flat-red {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(red.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-red {
+ background-position: 0 0;
+}
+ .icheckbox_flat-red.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-red.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-red.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-red {
+ background-position: -88px 0;
+}
+ .iradio_flat-red.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-red.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-red.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-red,
+ .iradio_flat-red {
+ background-image: url(red@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/red.png b/asset/static/css/icheck/flat/red.png
new file mode 100755
index 0000000..0d5ac38
Binary files /dev/null and b/asset/static/css/icheck/flat/red.png differ
diff --git a/asset/static/css/icheck/flat/red@2x.png b/asset/static/css/icheck/flat/red@2x.png
new file mode 100755
index 0000000..38590d9
Binary files /dev/null and b/asset/static/css/icheck/flat/red@2x.png differ
diff --git a/asset/static/css/icheck/flat/yellow.css b/asset/static/css/icheck/flat/yellow.css
new file mode 100755
index 0000000..7bb6039
--- /dev/null
+++ b/asset/static/css/icheck/flat/yellow.css
@@ -0,0 +1,53 @@
+/* iCheck plugin Flat skin, yellow
+----------------------------------- */
+.icheckbox_flat-yellow,
+.iradio_flat-yellow {
+ display: inline-block;
+ *display: inline;
+ vertical-align: middle;
+ margin: 0;
+ padding: 0;
+ width: 20px;
+ height: 20px;
+ background: url(yellow.png) no-repeat;
+ border: none;
+ cursor: pointer;
+}
+
+.icheckbox_flat-yellow {
+ background-position: 0 0;
+}
+ .icheckbox_flat-yellow.checked {
+ background-position: -22px 0;
+ }
+ .icheckbox_flat-yellow.disabled {
+ background-position: -44px 0;
+ cursor: default;
+ }
+ .icheckbox_flat-yellow.checked.disabled {
+ background-position: -66px 0;
+ }
+
+.iradio_flat-yellow {
+ background-position: -88px 0;
+}
+ .iradio_flat-yellow.checked {
+ background-position: -110px 0;
+ }
+ .iradio_flat-yellow.disabled {
+ background-position: -132px 0;
+ cursor: default;
+ }
+ .iradio_flat-yellow.checked.disabled {
+ background-position: -154px 0;
+ }
+
+/* HiDPI support */
+@media (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {
+ .icheckbox_flat-yellow,
+ .iradio_flat-yellow {
+ background-image: url(yellow@2x.png);
+ -webkit-background-size: 176px 22px;
+ background-size: 176px 22px;
+ }
+}
\ No newline at end of file
diff --git a/asset/static/css/icheck/flat/yellow.png b/asset/static/css/icheck/flat/yellow.png
new file mode 100755
index 0000000..909dadc
Binary files /dev/null and b/asset/static/css/icheck/flat/yellow.png differ
diff --git a/asset/static/css/icheck/flat/yellow@2x.png b/asset/static/css/icheck/flat/yellow@2x.png
new file mode 100755
index 0000000..9fd5d73
Binary files /dev/null and b/asset/static/css/icheck/flat/yellow@2x.png differ
diff --git a/asset/static/css/ion.rangeSlider.css b/asset/static/css/ion.rangeSlider.css
new file mode 100755
index 0000000..2b2b328
--- /dev/null
+++ b/asset/static/css/ion.rangeSlider.css
@@ -0,0 +1,146 @@
+/* Ion.RangeSlider
+// css version 2.0.3
+// © 2013-2014 Denis Ineshin | IonDen.com
+// ===================================================================================================================*/
+
+/* =====================================================================================================================
+// RangeSlider */
+
+.irs {
+ position: relative; display: block;
+ -webkit-touch-callout: none;
+ -webkit-user-select: none;
+ -khtml-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+ .irs-line {
+ position: relative; display: block;
+ overflow: hidden;
+ outline: none !important;
+ }
+ .irs-line-left, .irs-line-mid, .irs-line-right {
+ position: absolute; display: block;
+ top: 0;
+ }
+ .irs-line-left {
+ left: 0; width: 11%;
+ }
+ .irs-line-mid {
+ left: 9%; width: 82%;
+ }
+ .irs-line-right {
+ right: 0; width: 11%;
+ }
+
+ .irs-bar {
+ position: absolute; display: block;
+ left: 0; width: 0;
+ }
+ .irs-bar-edge {
+ position: absolute; display: block;
+ top: 0; left: 0;
+ }
+
+ .irs-shadow {
+ position: absolute; display: none;
+ left: 0; width: 0;
+ }
+
+ .irs-slider {
+ position: absolute; display: block;
+ cursor: default;
+ z-index: 1;
+ }
+ .irs-slider.single {
+
+ }
+ .irs-slider.from {
+
+ }
+ .irs-slider.to {
+
+ }
+ .irs-slider.type_last {
+ z-index: 2;
+ }
+
+ .irs-min {
+ position: absolute; display: block;
+ left: 0;
+ cursor: default;
+ }
+ .irs-max {
+ position: absolute; display: block;
+ right: 0;
+ cursor: default;
+ }
+
+ .irs-from, .irs-to, .irs-single {
+ position: absolute; display: block;
+ top: 0; left: 0;
+ cursor: default;
+ white-space: nowrap;
+ }
+
+.irs-grid {
+ position: absolute; display: none;
+ bottom: 0; left: 0;
+ width: 100%; height: 20px;
+}
+.irs-with-grid .irs-grid {
+ display: block;
+}
+ .irs-grid-pol {
+ position: absolute;
+ top: 0; left: 0;
+ width: 1px; height: 8px;
+ background: #000;
+ }
+ .irs-grid-pol.small {
+ height: 4px;
+ }
+ .irs-grid-text {
+ position: absolute;
+ bottom: 0; left: 0;
+ white-space: nowrap;
+ text-align: center;
+ font-size: 9px; line-height: 9px;
+ padding: 0 3px;
+ color: #000;
+ }
+
+.irs-disable-mask {
+ position: absolute; display: block;
+ top: 0; left: -1%;
+ width: 102%; height: 100%;
+ cursor: default;
+ background: rgba(0,0,0,0.0);
+ z-index: 2;
+}
+.irs-disabled {
+ opacity: 0.4;
+}
+.lt-ie9 .irs-disabled {
+ filter: alpha(opacity=40);
+}
+
+
+.irs-hidden-input {
+ position: absolute !important;
+ display: block !important;
+ top: 0 !important;
+ left: 0 !important;
+ width: 0 !important;
+ height: 0 !important;
+ font-size: 0 !important;
+ line-height: 0 !important;
+ padding: 0 !important;
+ margin: 0 !important;
+ outline: none !important;
+ z-index: -9999 !important;
+ background: none !important;
+ border-style: solid !important;
+ border-color: transparent !important;
+}
diff --git a/asset/static/css/ion.rangeSlider.skinFlat.css b/asset/static/css/ion.rangeSlider.skinFlat.css
new file mode 100755
index 0000000..793fb75
--- /dev/null
+++ b/asset/static/css/ion.rangeSlider.skinFlat.css
@@ -0,0 +1,106 @@
+/* Ion.RangeSlider, Flat UI Skin
+// css version 2.0.3
+// © Denis Ineshin, 2014 https://github.com/IonDen
+// ===================================================================================================================*/
+
+/* =====================================================================================================================
+// Skin details */
+
+.irs-line-mid,
+.irs-line-left,
+.irs-line-right,
+.irs-bar,
+.irs-bar-edge,
+.irs-slider {
+ background: url(../images/sprite-skin-flat.png) repeat-x;
+}
+
+.irs {
+ height: 40px;
+}
+.irs-with-grid {
+ height: 60px;
+}
+.irs-line {
+ height: 12px; top: 25px;
+}
+ .irs-line-left {
+ height: 12px;
+ background-position: 0 -30px;
+ }
+ .irs-line-mid {
+ height: 12px;
+ background-position: 0 0;
+ }
+ .irs-line-right {
+ height: 12px;
+ background-position: 100% -30px;
+ }
+
+.irs-bar {
+ height: 12px; top: 25px;
+ background-position: 0 -60px;
+}
+ .irs-bar-edge {
+ top: 25px;
+ height: 12px; width: 9px;
+ background-position: 0 -90px;
+ }
+
+.irs-shadow {
+ height: 3px; top: 34px;
+ background: #000;
+ opacity: 0.25;
+}
+.lt-ie9 .irs-shadow {
+ filter: alpha(opacity=25);
+}
+
+.irs-slider {
+ width: 16px; height: 18px;
+ top: 22px;
+ background-position: 0 -120px;
+}
+.irs-slider.state_hover, .irs-slider:hover {
+ background-position: 0 -150px;
+}
+
+.irs-min, .irs-max {
+ color: #999;
+ font-size: 10px; line-height: 1.333;
+ text-shadow: none;
+ top: 0; padding: 1px 3px;
+ background: #e1e4e9;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+}
+
+.irs-from, .irs-to, .irs-single {
+ color: #fff;
+ font-size: 10px; line-height: 1.333;
+ text-shadow: none;
+ padding: 1px 5px;
+ background: #ed5565;
+ -moz-border-radius: 4px;
+ border-radius: 4px;
+}
+.irs-from:after, .irs-to:after, .irs-single:after {
+ position: absolute; display: block; content: "";
+ bottom: -6px; left: 50%;
+ width: 0; height: 0;
+ margin-left: -3px;
+ overflow: hidden;
+ border: 3px solid transparent;
+ border-top-color: #ed5565;
+}
+
+
+.irs-grid-pol {
+ background: #e1e4e9;
+}
+.irs-grid-text {
+ color: #999;
+}
+
+.irs-disabled {
+}
diff --git a/asset/static/css/maps/jquery-jvectormap-2.0.3.css b/asset/static/css/maps/jquery-jvectormap-2.0.3.css
new file mode 100755
index 0000000..4726fd1
--- /dev/null
+++ b/asset/static/css/maps/jquery-jvectormap-2.0.3.css
@@ -0,0 +1,135 @@
+svg {
+ touch-action: none;
+}
+
+.jvectormap-container {
+ width: 100%;
+ height: 100%;
+ position: relative;
+ overflow: hidden;
+ touch-action: none;
+}
+
+.jvectormap-tip {
+ position: absolute;
+ display: none;
+ border: solid 1px #CDCDCD;
+ border-radius: 3px;
+ background: #292929;
+ color: white;
+ font-family: sans-serif, Verdana;
+ font-size: smaller;
+ padding: 3px;
+}
+
+.jvectormap-zoomin, .jvectormap-zoomout, .jvectormap-goback {
+ position: absolute;
+ left: 10px;
+ border-radius: 3px;
+ background: #292929;
+ padding: 3px;
+ color: white;
+ cursor: pointer;
+ line-height: 10px;
+ text-align: center;
+ box-sizing: content-box;
+}
+
+.jvectormap-zoomin, .jvectormap-zoomout {
+ width: 10px;
+ height: 10px;
+}
+
+.jvectormap-zoomin {
+ top: 10px;
+}
+
+.jvectormap-zoomout {
+ top: 30px;
+}
+
+.jvectormap-goback {
+ bottom: 10px;
+ z-index: 1000;
+ padding: 6px;
+}
+
+.jvectormap-spinner {
+ position: absolute;
+ left: 0;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ background: center no-repeat url(data:image/gif;base64,R0lGODlhIAAgAPMAAP///wAAAMbGxoSEhLa2tpqamjY2NlZWVtjY2OTk5Ly8vB4eHgQEBAAAAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh/hpDcmVhdGVkIHdpdGggYWpheGxvYWQuaW5mbwAh+QQJCgAAACwAAAAAIAAgAAAE5xDISWlhperN52JLhSSdRgwVo1ICQZRUsiwHpTJT4iowNS8vyW2icCF6k8HMMBkCEDskxTBDAZwuAkkqIfxIQyhBQBFvAQSDITM5VDW6XNE4KagNh6Bgwe60smQUB3d4Rz1ZBApnFASDd0hihh12BkE9kjAJVlycXIg7CQIFA6SlnJ87paqbSKiKoqusnbMdmDC2tXQlkUhziYtyWTxIfy6BE8WJt5YJvpJivxNaGmLHT0VnOgSYf0dZXS7APdpB309RnHOG5gDqXGLDaC457D1zZ/V/nmOM82XiHRLYKhKP1oZmADdEAAAh+QQJCgAAACwAAAAAIAAgAAAE6hDISWlZpOrNp1lGNRSdRpDUolIGw5RUYhhHukqFu8DsrEyqnWThGvAmhVlteBvojpTDDBUEIFwMFBRAmBkSgOrBFZogCASwBDEY/CZSg7GSE0gSCjQBMVG023xWBhklAnoEdhQEfyNqMIcKjhRsjEdnezB+A4k8gTwJhFuiW4dokXiloUepBAp5qaKpp6+Ho7aWW54wl7obvEe0kRuoplCGepwSx2jJvqHEmGt6whJpGpfJCHmOoNHKaHx61WiSR92E4lbFoq+B6QDtuetcaBPnW6+O7wDHpIiK9SaVK5GgV543tzjgGcghAgAh+QQJCgAAACwAAAAAIAAgAAAE7hDISSkxpOrN5zFHNWRdhSiVoVLHspRUMoyUakyEe8PTPCATW9A14E0UvuAKMNAZKYUZCiBMuBakSQKG8G2FzUWox2AUtAQFcBKlVQoLgQReZhQlCIJesQXI5B0CBnUMOxMCenoCfTCEWBsJColTMANldx15BGs8B5wlCZ9Po6OJkwmRpnqkqnuSrayqfKmqpLajoiW5HJq7FL1Gr2mMMcKUMIiJgIemy7xZtJsTmsM4xHiKv5KMCXqfyUCJEonXPN2rAOIAmsfB3uPoAK++G+w48edZPK+M6hLJpQg484enXIdQFSS1u6UhksENEQAAIfkECQoAAAAsAAAAACAAIAAABOcQyEmpGKLqzWcZRVUQnZYg1aBSh2GUVEIQ2aQOE+G+cD4ntpWkZQj1JIiZIogDFFyHI0UxQwFugMSOFIPJftfVAEoZLBbcLEFhlQiqGp1Vd140AUklUN3eCA51C1EWMzMCezCBBmkxVIVHBWd3HHl9JQOIJSdSnJ0TDKChCwUJjoWMPaGqDKannasMo6WnM562R5YluZRwur0wpgqZE7NKUm+FNRPIhjBJxKZteWuIBMN4zRMIVIhffcgojwCF117i4nlLnY5ztRLsnOk+aV+oJY7V7m76PdkS4trKcdg0Zc0tTcKkRAAAIfkECQoAAAAsAAAAACAAIAAABO4QyEkpKqjqzScpRaVkXZWQEximw1BSCUEIlDohrft6cpKCk5xid5MNJTaAIkekKGQkWyKHkvhKsR7ARmitkAYDYRIbUQRQjWBwJRzChi9CRlBcY1UN4g0/VNB0AlcvcAYHRyZPdEQFYV8ccwR5HWxEJ02YmRMLnJ1xCYp0Y5idpQuhopmmC2KgojKasUQDk5BNAwwMOh2RtRq5uQuPZKGIJQIGwAwGf6I0JXMpC8C7kXWDBINFMxS4DKMAWVWAGYsAdNqW5uaRxkSKJOZKaU3tPOBZ4DuK2LATgJhkPJMgTwKCdFjyPHEnKxFCDhEAACH5BAkKAAAALAAAAAAgACAAAATzEMhJaVKp6s2nIkolIJ2WkBShpkVRWqqQrhLSEu9MZJKK9y1ZrqYK9WiClmvoUaF8gIQSNeF1Er4MNFn4SRSDARWroAIETg1iVwuHjYB1kYc1mwruwXKC9gmsJXliGxc+XiUCby9ydh1sOSdMkpMTBpaXBzsfhoc5l58Gm5yToAaZhaOUqjkDgCWNHAULCwOLaTmzswadEqggQwgHuQsHIoZCHQMMQgQGubVEcxOPFAcMDAYUA85eWARmfSRQCdcMe0zeP1AAygwLlJtPNAAL19DARdPzBOWSm1brJBi45soRAWQAAkrQIykShQ9wVhHCwCQCACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq+E71SRQeyqUToLA7VxF0JDyIQh/MVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiRMDjI0Fd30/iI2UA5GSS5UDj2l6NoqgOgN4gksEBgYFf0FDqKgHnyZ9OX8HrgYHdHpcHQULXAS2qKpENRg7eAMLC7kTBaixUYFkKAzWAAnLC7FLVxLWDBLKCwaKTULgEwbLA4hJtOkSBNqITT3xEgfLpBtzE/jiuL04RGEBgwWhShRgQExHBAAh+QQJCgAAACwAAAAAIAAgAAAE7xDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfZiCqGk5dTESJeaOAlClzsJsqwiJwiqnFrb2nS9kmIcgEsjQydLiIlHehhpejaIjzh9eomSjZR+ipslWIRLAgMDOR2DOqKogTB9pCUJBagDBXR6XB0EBkIIsaRsGGMMAxoDBgYHTKJiUYEGDAzHC9EACcUGkIgFzgwZ0QsSBcXHiQvOwgDdEwfFs0sDzt4S6BK4xYjkDOzn0unFeBzOBijIm1Dgmg5YFQwsCMjp1oJ8LyIAACH5BAkKAAAALAAAAAAgACAAAATwEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq+E71SRQeyqUToLA7VxF0JDyIQh/MVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GGl6NoiPOH16iZKNlH6KmyWFOggHhEEvAwwMA0N9GBsEC6amhnVcEwavDAazGwIDaH1ipaYLBUTCGgQDA8NdHz0FpqgTBwsLqAbWAAnIA4FWKdMLGdYGEgraigbT0OITBcg5QwPT4xLrROZL6AuQAPUS7bxLpoWidY0JtxLHKhwwMJBTHgPKdEQAACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq+E71SRQeyqUToLA7VxF0JDyIQh/MVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GAULDJCRiXo1CpGXDJOUjY+Yip9DhToJA4RBLwMLCwVDfRgbBAaqqoZ1XBMHswsHtxtFaH1iqaoGNgAIxRpbFAgfPQSqpbgGBqUD1wBXeCYp1AYZ19JJOYgH1KwA4UBvQwXUBxPqVD9L3sbp2BNk2xvvFPJd+MFCN6HAAIKgNggY0KtEBAAh+QQJCgAAACwAAAAAIAAgAAAE6BDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfYIDMaAFdTESJeaEDAIMxYFqrOUaNW4E4ObYcCXaiBVEgULe0NJaxxtYksjh2NLkZISgDgJhHthkpU4mW6blRiYmZOlh4JWkDqILwUGBnE6TYEbCgevr0N1gH4At7gHiRpFaLNrrq8HNgAJA70AWxQIH1+vsYMDAzZQPC9VCNkDWUhGkuE5PxJNwiUK4UfLzOlD4WvzAHaoG9nxPi5d+jYUqfAhhykOFwJWiAAAIfkECQoAAAAsAAAAACAAIAAABPAQyElpUqnqzaciSoVkXVUMFaFSwlpOCcMYlErAavhOMnNLNo8KsZsMZItJEIDIFSkLGQoQTNhIsFehRww2CQLKF0tYGKYSg+ygsZIuNqJksKgbfgIGepNo2cIUB3V1B3IvNiBYNQaDSTtfhhx0CwVPI0UJe0+bm4g5VgcGoqOcnjmjqDSdnhgEoamcsZuXO1aWQy8KAwOAuTYYGwi7w5h+Kr0SJ8MFihpNbx+4Erq7BYBuzsdiH1jCAzoSfl0rVirNbRXlBBlLX+BP0XJLAPGzTkAuAOqb0WT5AH7OcdCm5B8TgRwSRKIHQtaLCwg1RAAAOwAAAAAAAAAAAA==);
+}
+
+.jvectormap-legend-title {
+ font-weight: bold;
+ font-size: 14px;
+ text-align: center;
+}
+
+.jvectormap-legend-cnt {
+ position: absolute;
+}
+
+.jvectormap-legend-cnt-h {
+ bottom: 0;
+ right: 0;
+}
+
+.jvectormap-legend-cnt-v {
+ top: 0;
+ right: 0;
+}
+
+.jvectormap-legend {
+ background: black;
+ color: white;
+ border-radius: 3px;
+}
+
+.jvectormap-legend-cnt-h .jvectormap-legend {
+ float: left;
+ margin: 0 10px 10px 0;
+ padding: 3px 3px 1px 3px;
+}
+
+.jvectormap-legend-cnt-h .jvectormap-legend .jvectormap-legend-tick {
+ float: left;
+}
+
+.jvectormap-legend-cnt-v .jvectormap-legend {
+ margin: 10px 10px 0 0;
+ padding: 3px;
+}
+
+.jvectormap-legend-cnt-h .jvectormap-legend-tick {
+ width: 40px;
+}
+
+.jvectormap-legend-cnt-h .jvectormap-legend-tick-sample {
+ height: 15px;
+}
+
+.jvectormap-legend-cnt-v .jvectormap-legend-tick-sample {
+ height: 20px;
+ width: 20px;
+ display: inline-block;
+ vertical-align: middle;
+}
+
+.jvectormap-legend-tick-text {
+ font-size: 12px;
+}
+
+.jvectormap-legend-cnt-h .jvectormap-legend-tick-text {
+ text-align: center;
+}
+
+.jvectormap-legend-cnt-v .jvectormap-legend-tick-text {
+ display: inline-block;
+ vertical-align: middle;
+ line-height: 20px;
+ padding-left: 3px;
+}
diff --git a/asset/static/css/normalize.css b/asset/static/css/normalize.css
new file mode 100755
index 0000000..1f216c9
--- /dev/null
+++ b/asset/static/css/normalize.css
@@ -0,0 +1,421 @@
+/*! normalize.css v3.0.2 | MIT License | git.io/normalize */
+
+/**
+ * 1. Set default font family to sans-serif.
+ * 2. Prevent iOS text size adjust after orientation change, without disabling
+ * user zoom.
+ */
+
+html {
+ font-family: sans-serif; /* 1 */
+ -ms-text-size-adjust: 100%; /* 2 */
+ -webkit-text-size-adjust: 100%; /* 2 */
+}
+
+/**
+ * Remove default margin.
+ */
+
+body {
+ margin: 0;
+}
+
+/* HTML5 display definitions
+ ========================================================================== */
+
+/**
+ * Correct `block` display not defined for any HTML5 element in IE 8/9.
+ * Correct `block` display not defined for `details` or `summary` in IE 10/11
+ * and Firefox.
+ * Correct `block` display not defined for `main` in IE 11.
+ */
+
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+menu,
+nav,
+section,
+summary {
+ display: block;
+}
+
+/**
+ * 1. Correct `inline-block` display not defined in IE 8/9.
+ * 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.
+ */
+
+audio,
+canvas,
+progress,
+video {
+ display: inline-block; /* 1 */
+ vertical-align: baseline; /* 2 */
+}
+
+/**
+ * Prevent modern browsers from displaying `audio` without controls.
+ * Remove excess height in iOS 5 devices.
+ */
+
+audio:not([controls]) {
+ display: none;
+ height: 0;
+}
+
+/**
+ * Address `[hidden]` styling not present in IE 8/9/10.
+ * Hide the `template` element in IE 8/9/11, Safari, and Firefox < 22.
+ */
+
+[hidden],
+template {
+ display: none;
+}
+
+/* Links
+ ========================================================================== */
+
+/**
+ * Remove the gray background color from active links in IE 10.
+ */
+
+a {
+ background-color: transparent;
+}
+
+/**
+ * Improve readability when focused and also mouse hovered in all browsers.
+ */
+
+a:active,
+a:hover {
+ outline: 0;
+}
+
+/* Text-level semantics
+ ========================================================================== */
+
+/**
+ * Address styling not present in IE 8/9/10/11, Safari, and Chrome.
+ */
+
+abbr[title] {
+ border-bottom: 1px dotted;
+}
+
+/**
+ * Address style set to `bolder` in Firefox 4+, Safari, and Chrome.
+ */
+
+b,
+strong {
+ font-weight: bold;
+}
+
+/**
+ * Address styling not present in Safari and Chrome.
+ */
+
+dfn {
+ font-style: italic;
+}
+
+/**
+ * Address variable `h1` font-size and margin within `section` and `article`
+ * contexts in Firefox 4+, Safari, and Chrome.
+ */
+
+h1 {
+ font-size: 2em;
+ margin: 0.67em 0;
+}
+
+/**
+ * Address styling not present in IE 8/9.
+ */
+
+mark {
+ background: #ff0;
+ color: #000;
+}
+
+/**
+ * Address inconsistent and variable font size in all browsers.
+ */
+
+small {
+ font-size: 80%;
+}
+
+/**
+ * Prevent `sub` and `sup` affecting `line-height` in all browsers.
+ */
+
+sub,
+sup {
+ font-size: 75%;
+ line-height: 0;
+ position: relative;
+ vertical-align: baseline;
+}
+
+sup {
+ top: -0.5em;
+}
+
+sub {
+ bottom: -0.25em;
+}
+
+/* Embedded content
+ ========================================================================== */
+
+/**
+ * Remove border when inside `a` element in IE 8/9/10.
+ */
+
+img {
+ border: 0;
+}
+
+/**
+ * Correct overflow not hidden in IE 9/10/11.
+ */
+
+svg:not(:root) {
+ overflow: hidden;
+}
+
+/* Grouping content
+ ========================================================================== */
+
+/**
+ * Address margin not present in IE 8/9 and Safari.
+ */
+
+figure {
+ margin: 1em 40px;
+}
+
+/**
+ * Address differences between Firefox and other browsers.
+ */
+
+hr {
+ -moz-box-sizing: content-box;
+ box-sizing: content-box;
+ height: 0;
+}
+
+/**
+ * Contain overflow in all browsers.
+ */
+
+pre {
+ overflow: auto;
+}
+
+/**
+ * Address odd `em`-unit font size rendering in all browsers.
+ */
+
+code,
+kbd,
+pre,
+samp {
+ font-family: monospace, monospace;
+ font-size: 1em;
+}
+
+/* Forms
+ ========================================================================== */
+
+/**
+ * Known limitation: by default, Chrome and Safari on OS X allow very limited
+ * styling of `select`, unless a `border` property is set.
+ */
+
+/**
+ * 1. Correct color not being inherited.
+ * Known issue: affects color of disabled elements.
+ * 2. Correct font properties not being inherited.
+ * 3. Address margins set differently in Firefox 4+, Safari, and Chrome.
+ */
+
+button,
+input,
+optgroup,
+select,
+textarea {
+ color: inherit; /* 1 */
+ font: inherit; /* 2 */
+ margin: 0; /* 3 */
+}
+
+/**
+ * Address `overflow` set to `hidden` in IE 8/9/10/11.
+ */
+
+button {
+ overflow: visible;
+}
+
+/**
+ * Address inconsistent `text-transform` inheritance for `button` and `select`.
+ * All other form control elements do not inherit `text-transform` values.
+ * Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.
+ * Correct `select` style inheritance in Firefox.
+ */
+
+button,
+select {
+ text-transform: none;
+}
+
+/**
+ * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
+ * and `video` controls.
+ * 2. Correct inability to style clickable `input` types in iOS.
+ * 3. Improve usability and consistency of cursor style between image-type
+ * `input` and others.
+ */
+
+button,
+html input[type="button"], /* 1 */
+input[type="reset"],
+input[type="submit"] {
+ -webkit-appearance: button; /* 2 */
+ cursor: pointer; /* 3 */
+}
+
+/**
+ * Re-set default cursor for disabled elements.
+ */
+
+button[disabled],
+html input[disabled] {
+ cursor: default;
+}
+
+/**
+ * Remove inner padding and border in Firefox 4+.
+ */
+
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+ border: 0;
+ padding: 0;
+}
+
+/**
+ * Address Firefox 4+ setting `line-height` on `input` using `!important` in
+ * the UA stylesheet.
+ */
+
+input {
+ line-height: normal;
+}
+
+/**
+ * It's recommended that you don't attempt to style these elements.
+ * Firefox's implementation doesn't respect box-sizing, padding, or width.
+ *
+ * 1. Address box sizing set to `content-box` in IE 8/9/10.
+ * 2. Remove excess padding in IE 8/9/10.
+ */
+
+input[type="checkbox"],
+input[type="radio"] {
+ box-sizing: border-box; /* 1 */
+ padding: 0; /* 2 */
+}
+
+/**
+ * Fix the cursor style for Chrome's increment/decrement buttons. For certain
+ * `font-size` values of the `input`, it causes the cursor style of the
+ * decrement button to change from `default` to `text`.
+ */
+
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+ height: auto;
+}
+
+/**
+ * 1. Address `appearance` set to `searchfield` in Safari and Chrome.
+ * 2. Address `box-sizing` set to `border-box` in Safari and Chrome
+ * (include `-moz` to future-proof).
+ */
+
+input[type="search"] {
+ -webkit-appearance: textfield; /* 1 */
+ -moz-box-sizing: content-box;
+ -webkit-box-sizing: content-box; /* 2 */
+ box-sizing: content-box;
+}
+
+/**
+ * Remove inner padding and search cancel button in Safari and Chrome on OS X.
+ * Safari (but not Chrome) clips the cancel button when the search input has
+ * padding (and `textfield` appearance).
+ */
+
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+ -webkit-appearance: none;
+}
+
+/**
+ * Define consistent border, margin, and padding.
+ */
+
+/**
+ * 1. Correct `color` not being inherited in IE 8/9/10/11.
+ * 2. Remove padding so people aren't caught out if they zero out fieldsets.
+ */
+
+legend {
+ border: 0; /* 1 */
+ padding: 0; /* 2 */
+}
+
+/**
+ * Remove default vertical scrollbar in IE 8/9/10/11.
+ */
+
+textarea {
+ overflow: auto;
+}
+
+/**
+ * Don't inherit the `font-weight` (applied by a rule above).
+ * NOTE: the default cannot safely be changed in Chrome and Safari on OS X.
+ */
+
+optgroup {
+ font-weight: bold;
+}
+
+/* Tables
+ ========================================================================== */
+
+/**
+ * Remove most spacing between table cells.
+ */
+
+table {
+ border-collapse: collapse;
+ border-spacing: 0;
+}
+
+td,
+th {
+ padding: 0;
+}
diff --git a/asset/static/css/nprogress.css b/asset/static/css/nprogress.css
new file mode 100755
index 0000000..6752d7f
--- /dev/null
+++ b/asset/static/css/nprogress.css
@@ -0,0 +1,74 @@
+/* Make clicks pass-through */
+#nprogress {
+ pointer-events: none;
+}
+
+#nprogress .bar {
+ background: #29d;
+
+ position: fixed;
+ z-index: 1031;
+ top: 0;
+ left: 0;
+
+ width: 100%;
+ height: 2px;
+}
+
+/* Fancy blur effect */
+#nprogress .peg {
+ display: block;
+ position: absolute;
+ right: 0px;
+ width: 100px;
+ height: 100%;
+ box-shadow: 0 0 10px #29d, 0 0 5px #29d;
+ opacity: 1.0;
+
+ -webkit-transform: rotate(3deg) translate(0px, -4px);
+ -ms-transform: rotate(3deg) translate(0px, -4px);
+ transform: rotate(3deg) translate(0px, -4px);
+}
+
+/* Remove these to get rid of the spinner */
+#nprogress .spinner {
+ display: block;
+ position: fixed;
+ z-index: 1031;
+ top: 15px;
+ right: 15px;
+}
+
+#nprogress .spinner-icon {
+ width: 18px;
+ height: 18px;
+ box-sizing: border-box;
+
+ border: solid 2px transparent;
+ border-top-color: #29d;
+ border-left-color: #29d;
+ border-radius: 50%;
+
+ -webkit-animation: nprogress-spinner 400ms linear infinite;
+ animation: nprogress-spinner 400ms linear infinite;
+}
+
+.nprogress-custom-parent {
+ overflow: hidden;
+ position: relative;
+}
+
+.nprogress-custom-parent #nprogress .spinner,
+.nprogress-custom-parent #nprogress .bar {
+ position: absolute;
+}
+
+@-webkit-keyframes nprogress-spinner {
+ 0% { -webkit-transform: rotate(0deg); }
+ 100% { -webkit-transform: rotate(360deg); }
+}
+@keyframes nprogress-spinner {
+ 0% { transform: rotate(0deg); }
+ 100% { transform: rotate(360deg); }
+}
+
diff --git a/asset/static/css/progressbar/bootstrap-progressbar-3.3.0.css b/asset/static/css/progressbar/bootstrap-progressbar-3.3.0.css
new file mode 100755
index 0000000..5f1a7e6
--- /dev/null
+++ b/asset/static/css/progressbar/bootstrap-progressbar-3.3.0.css
@@ -0,0 +1,161 @@
+/*! bootstrap-progressbar v0.8.4 | Copyright (c) 2012-2014 Stephan Groß | MIT license | http://www.minddust.com */
+@-webkit-keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+@keyframes progress-bar-stripes {
+ from {
+ background-position: 40px 0;
+ }
+ to {
+ background-position: 0 0;
+ }
+}
+
+.progress{border-radius:0; margin-bottom: 18px; }
+.progress.right .progress-bar{
+ float:right; right:0;
+}
+.progress.vertical {
+ width: 40px;
+}
+.progress.progress_sm{border-radius:0; margin-bottom: 18px; height: 10px !important; }
+.progress.progress_sm .progress-bar{height: 10px !important; }
+.dashboard_graph p {margin: 0 0 4px; }
+ul.verticle_bars{
+ width: 100%;
+}
+ul.verticle_bars li{
+ width: 23%; height:200px; margin: 0;
+}
+
+
+.progress {
+ overflow: hidden;
+ height: 20px;
+ margin-bottom: 20px;
+ background-color: #f5f5f5;
+ -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
+ box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
+}
+.progress-bar {
+ float: left;
+ width: 0%;
+ height: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ color: #ffffff;
+ text-align: center;
+ background-color: #428bca;
+ -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
+ box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
+ -webkit-transition: width 0.6s ease;
+ -o-transition: width 0.6s ease;
+ transition: width 0.6s ease;
+}
+.progress-striped .progress-bar,
+.progress-bar-striped {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-size: 40px 40px;
+}
+.progress.active .progress-bar,
+.progress-bar.active {
+ -webkit-animation: progress-bar-stripes 2s linear infinite;
+ -o-animation: progress-bar-stripes 2s linear infinite;
+ animation: progress-bar-stripes 2s linear infinite;
+}
+.progress-bar-success {
+ background-color: #26B99A;
+}
+.progress-striped .progress-bar-success {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress-bar-info {
+ background-color: #3498DB;
+}
+.progress-striped .progress-bar-info {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress-bar-warning {
+ background-color: #F39C12;
+}
+.progress-striped .progress-bar-warning {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress-bar-danger {
+ background-color: #d9534f;
+}
+.progress-striped .progress-bar-danger {
+ background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+ background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
+}
+.progress {
+ position: relative;
+}
+.progress .progress-bar {
+ position: absolute;
+ overflow: hidden;
+ line-height: 20px;
+}
+.progress .progressbar-back-text {
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ text-align: center;
+}
+.progress .progressbar-front-text {
+ display: block;
+ width: 100%;
+ font-size: 12px;
+ line-height: 20px;
+ text-align: center;
+}
+.progress.right .progress-bar {
+ right: 0;
+}
+.progress.right .progressbar-front-text {
+ position: absolute;
+ right: 0;
+}
+.progress.vertical {
+ width: 20px;
+ height: 100%;
+ float: left;
+ margin-right: 10px;
+}
+.progress.vertical.progress_wide{
+ width: 35px;
+}
+.progress.vertical.bottom {
+ position: relative;
+}
+.progress.vertical.bottom .progressbar-front-text {
+ position: absolute;
+ bottom: 0;
+}
+.progress.vertical .progress-bar {
+ width: 100%;
+ height: 0;
+ -webkit-transition: height 0.6s ease;
+ -o-transition: height 0.6s ease;
+ transition: height 0.6s ease;
+}
+.progress.vertical.bottom .progress-bar {
+ position: absolute;
+ bottom: 0;
+}
diff --git a/asset/static/css/select/select2.min.css b/asset/static/css/select/select2.min.css
new file mode 100755
index 0000000..e31adef
--- /dev/null
+++ b/asset/static/css/select/select2.min.css
@@ -0,0 +1,404 @@
+.select2-container {
+ box-sizing: border-box;
+ display: inline-block;
+ margin: 0;
+ position: relative;
+ vertical-align: middle; }
+ .select2-container .select2-selection--single {
+ box-sizing: border-box;
+ cursor: pointer;
+ display: block;
+ user-select: none;
+ -webkit-user-select: none; }
+ .select2-container .select2-selection--single .select2-selection__rendered {
+ display: block;
+ overflow: hidden;
+ padding-left: 8px;
+ padding-right: 20px;
+ text-overflow: ellipsis; }
+ .select2-container[dir="rtl"] .select2-selection--single .select2-selection__rendered {
+ padding-right: 8px;
+ padding-left: 20px; }
+ .select2-container .select2-selection--multiple {
+ box-sizing: border-box;
+ cursor: pointer;
+ display: block;
+ min-height: 38px;
+ user-select: none;
+ -webkit-user-select: none; }
+ .select2-container .select2-selection--multiple .select2-selection__rendered {
+ display: inline-block;
+ overflow: hidden;
+ padding-left: 8px;
+ text-overflow: ellipsis; }
+ .select2-container .select2-search--inline {
+ float: left; }
+ .select2-container .select2-search--inline .select2-search__field {
+ border: none;
+ font-size: 100%;
+ margin-top: 10px; }
+
+.select2-dropdown {
+ background-color: white;
+ border: 1px solid #aaa;
+ border-radius: 4px;
+ box-sizing: border-box;
+ display: block;
+ position: absolute;
+ left: -100000px;
+ width: 100%;
+ z-index: 1051; }
+
+.select2-results {
+ display: block; }
+
+.select2-results__options {
+ list-style: none;
+ margin: 0;
+ padding: 0; }
+
+.select2-results__option {
+ padding: 6px;
+ user-select: none;
+ -webkit-user-select: none; }
+ .select2-results__option[aria-selected] {
+ cursor: pointer; }
+
+.select2-container--open .select2-dropdown {
+ left: 0; }
+
+.select2-container--open .select2-dropdown--above {
+ border-bottom: none;
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0; }
+
+.select2-container--open .select2-dropdown--below {
+ border-top: none;
+ border-top-left-radius: 0;
+ border-top-right-radius: 0; }
+
+.select2-search--dropdown {
+ display: block;
+ padding: 4px; }
+ .select2-search--dropdown .select2-search__field {
+ padding: 4px;
+ width: 100%;
+ box-sizing: border-box; }
+ .select2-search--dropdown.select2-search--hide {
+ display: none; }
+
+.select2-close-mask {
+ border: 0;
+ margin: 0;
+ padding: 0;
+ display: block;
+ position: fixed;
+ left: 0;
+ top: 0;
+ min-height: 100%;
+ min-width: 100%;
+ height: auto;
+ width: auto;
+ opacity: 0;
+ z-index: 99;
+ background-color: #fff;
+ filter: alpha(opacity=0); }
+
+.select2-container--default .select2-selection--single {
+ background-color: #fff;
+ border: 1px solid #ccc;}
+ .select2-container--default .select2-selection--single .select2-selection__rendered {
+ line-height: 38px; }
+ .select2-container--default .select2-selection--single .select2-selection__clear {
+ cursor: pointer;
+ float: right;
+ font-weight: bold; }
+ .select2-container--default .select2-selection--single .select2-selection__placeholder {
+ color: #999; }
+ .select2-container--default .select2-selection--single .select2-selection__arrow {
+ height: 26px;
+ position: absolute;
+ top: 1px;
+ right: 1px;
+ width: 20px; }
+ .select2-container--default .select2-selection--single .select2-selection__arrow b {
+ border-color: #888 transparent transparent transparent;
+ border-style: solid;
+ border-width: 5px 4px 0 4px;
+ height: 0;
+ left: 50%;
+ margin-left: -4px;
+ margin-top: -2px;
+ position: absolute;
+ top: 50%;
+ width: 0; }
+.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__clear {
+ float: left; }
+.select2-container--default[dir="rtl"] .select2-selection--single .select2-selection__arrow {
+ left: 1px;
+ right: auto; }
+.select2-container--default.select2-container--disabled .select2-selection--single {
+ background-color: #eee;
+ cursor: default; }
+ .select2-container--default.select2-container--disabled .select2-selection--single .select2-selection__clear {
+ display: none; }
+.select2-container--default.select2-container--open .select2-selection--single .select2-selection__arrow b {
+ border-color: transparent transparent #888 transparent;
+ border-width: 0 4px 5px 4px; }
+.select2-container--default .select2-selection--multiple {
+ background-color: white;
+ border: 1px solid #ccc;
+ cursor: text; }
+ .select2-container--default .select2-selection--multiple .select2-selection__rendered {
+ list-style: none;
+ margin: 0;
+ padding: 0 5px;
+ width: 100%; }
+ .select2-container--default .select2-selection--multiple .select2-selection__placeholder {
+ color: #999;
+ margin-top: 5px;
+ float: left; }
+ .select2-container--default .select2-selection--multiple .select2-selection__clear {
+ cursor: pointer;
+ float: right;
+ font-weight: bold;
+ margin-top: 5px;
+ margin-right: 10px; }
+ .select2-container--default .select2-selection--multiple .select2-selection__choice {
+ background-color: #e4e4e4;
+ cursor: default;
+ float: left;
+ margin-right: 5px;
+ margin-top: 5px;
+ padding: 3px 5px; }
+ .select2-container--default .select2-selection--multiple .select2-selection__choice__remove {
+ color: #999;
+ cursor: pointer;
+ display: inline-block;
+ font-weight: bold;
+ margin-right: 2px; }
+ .select2-container--default .select2-selection--multiple .select2-selection__choice__remove:hover {
+ color: #333; }
+.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice, .select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__placeholder {
+ float: right; }
+.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice {
+ margin-left: 5px;
+ margin-right: auto; }
+.select2-container--default[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove {
+ margin-left: 2px;
+ margin-right: auto; }
+.select2-container--default.select2-container--disabled .select2-selection--multiple {
+ background-color: #eee;
+ cursor: default; }
+.select2-container--default.select2-container--disabled .select2-selection__choice__remove {
+ display: none; }
+.select2-container--default.select2-container--open.select2-container--above .select2-selection--single, .select2-container--default.select2-container--open.select2-container--above .select2-selection--multiple {
+ border-top-left-radius: 0;
+ border-top-right-radius: 0; }
+.select2-container--default.select2-container--open.select2-container--below .select2-selection--single, .select2-container--default.select2-container--open.select2-container--below .select2-selection--multiple {
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0; }
+.select2-container--default .select2-search--dropdown .select2-search__field {
+ border: 1px solid #aaa; }
+.select2-container--default .select2-search--inline .select2-search__field {
+ background: transparent;
+ border: none;
+ outline: 0; }
+.select2-container--default .select2-results > .select2-results__options {
+ max-height: 200px;
+ overflow-y: auto; }
+.select2-container--default .select2-results__option[role=group] {
+ padding: 0; }
+.select2-container--default .select2-results__option[aria-disabled=true] {
+ color: #999; }
+.select2-container--default .select2-results__option[aria-selected=true] {
+ background-color: #ddd; }
+.select2-container--default .select2-results__option .select2-results__option {
+ padding-left: 1em; }
+ .select2-container--default .select2-results__option .select2-results__option .select2-results__group {
+ padding-left: 0; }
+ .select2-container--default .select2-results__option .select2-results__option .select2-results__option {
+ margin-left: -1em;
+ padding-left: 2em; }
+ .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
+ margin-left: -2em;
+ padding-left: 3em; }
+ .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
+ margin-left: -3em;
+ padding-left: 4em; }
+ .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
+ margin-left: -4em;
+ padding-left: 5em; }
+ .select2-container--default .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option .select2-results__option {
+ margin-left: -5em;
+ padding-left: 6em; }
+.select2-container--default .select2-results__option--highlighted[aria-selected] {
+ background-color: #5897fb;
+ color: white; }
+.select2-container--default .select2-results__group {
+ cursor: default;
+ display: block;
+ padding: 6px; }
+
+.select2-container--classic .select2-selection--single {
+ background-color: #f6f6f6;
+ border: 1px solid #aaa;
+ border-radius: 4px;
+ outline: 0;
+ background-image: -webkit-linear-gradient(top, #ffffff 50%, #eeeeee 100%);
+ background-image: -o-linear-gradient(top, #ffffff 50%, #eeeeee 100%);
+ background-image: linear-gradient(to bottom, #ffffff 50%, #eeeeee 100%);
+ background-repeat: repeat-x;
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#eeeeee', GradientType=0); }
+ .select2-container--classic .select2-selection--single:focus {
+ border: 1px solid #5897fb; }
+ .select2-container--classic .select2-selection--single .select2-selection__rendered {
+ color: #444;
+ line-height: 28px; }
+ .select2-container--classic .select2-selection--single .select2-selection__clear {
+ cursor: pointer;
+ float: right;
+ font-weight: bold;
+ margin-right: 10px; }
+ .select2-container--classic .select2-selection--single .select2-selection__placeholder {
+ color: #999; }
+ .select2-container--classic .select2-selection--single .select2-selection__arrow {
+ background-color: #ddd;
+ border: none;
+ border-left: 1px solid #aaa;
+ border-top-right-radius: 4px;
+ border-bottom-right-radius: 4px;
+ height: 26px;
+ position: absolute;
+ top: 1px;
+ right: 1px;
+ width: 20px;
+ background-image: -webkit-linear-gradient(top, #eeeeee 50%, #cccccc 100%);
+ background-image: -o-linear-gradient(top, #eeeeee 50%, #cccccc 100%);
+ background-image: linear-gradient(to bottom, #eeeeee 50%, #cccccc 100%);
+ background-repeat: repeat-x;
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#eeeeee', endColorstr='#cccccc', GradientType=0); }
+ .select2-container--classic .select2-selection--single .select2-selection__arrow b {
+ border-color: #888 transparent transparent transparent;
+ border-style: solid;
+ border-width: 5px 4px 0 4px;
+ height: 0;
+ left: 50%;
+ margin-left: -4px;
+ margin-top: -2px;
+ position: absolute;
+ top: 50%;
+ width: 0; }
+.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__clear {
+ float: left; }
+.select2-container--classic[dir="rtl"] .select2-selection--single .select2-selection__arrow {
+ border: none;
+ border-right: 1px solid #aaa;
+ border-radius: 0;
+ border-top-left-radius: 4px;
+ border-bottom-left-radius: 4px;
+ left: 1px;
+ right: auto; }
+.select2-container--classic.select2-container--open .select2-selection--single {
+ border: 1px solid #5897fb; }
+ .select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow {
+ background: transparent;
+ border: none; }
+ .select2-container--classic.select2-container--open .select2-selection--single .select2-selection__arrow b {
+ border-color: transparent transparent #888 transparent;
+ border-width: 0 4px 5px 4px; }
+.select2-container--classic.select2-container--open.select2-container--above .select2-selection--single {
+ border-top: none;
+ border-top-left-radius: 0;
+ border-top-right-radius: 0;
+ background-image: -webkit-linear-gradient(top, #ffffff 0%, #eeeeee 50%);
+ background-image: -o-linear-gradient(top, #ffffff 0%, #eeeeee 50%);
+ background-image: linear-gradient(to bottom, #ffffff 0%, #eeeeee 50%);
+ background-repeat: repeat-x;
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#eeeeee', GradientType=0); }
+.select2-container--classic.select2-container--open.select2-container--below .select2-selection--single {
+ border-bottom: none;
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0;
+ background-image: -webkit-linear-gradient(top, #eeeeee 50%, #ffffff 100%);
+ background-image: -o-linear-gradient(top, #eeeeee 50%, #ffffff 100%);
+ background-image: linear-gradient(to bottom, #eeeeee 50%, #ffffff 100%);
+ background-repeat: repeat-x;
+ filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#eeeeee', endColorstr='#ffffff', GradientType=0); }
+.select2-container--classic .select2-selection--multiple {
+ background-color: white;
+ border: 1px solid #aaa;
+ border-radius: 4px;
+ cursor: text;
+ outline: 0; }
+ .select2-container--classic .select2-selection--multiple:focus {
+ border: 1px solid #5897fb; }
+ .select2-container--classic .select2-selection--multiple .select2-selection__rendered {
+ list-style: none;
+ margin: 0;
+ padding: 0 5px; }
+ .select2-container--classic .select2-selection--multiple .select2-selection__clear {
+ display: none; }
+ .select2-container--classic .select2-selection--multiple .select2-selection__choice {
+ background-color: #e4e4e4;
+ border: 1px solid #aaa;
+ border-radius: 4px;
+ cursor: default;
+ float: left;
+ margin-right: 5px;
+ margin-top: 5px;
+ padding: 0 5px; }
+ .select2-container--classic .select2-selection--multiple .select2-selection__choice__remove {
+ color: #888;
+ cursor: pointer;
+ display: inline-block;
+ font-weight: bold;
+ margin-right: 2px; }
+ .select2-container--classic .select2-selection--multiple .select2-selection__choice__remove:hover {
+ color: #555; }
+.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice {
+ float: right; }
+.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice {
+ margin-left: 5px;
+ margin-right: auto; }
+.select2-container--classic[dir="rtl"] .select2-selection--multiple .select2-selection__choice__remove {
+ margin-left: 2px;
+ margin-right: auto; }
+.select2-container--classic.select2-container--open .select2-selection--multiple {
+ border: 1px solid #5897fb; }
+.select2-container--classic.select2-container--open.select2-container--above .select2-selection--multiple {
+ border-top: none;
+ border-top-left-radius: 0;
+ border-top-right-radius: 0; }
+.select2-container--classic.select2-container--open.select2-container--below .select2-selection--multiple {
+ border-bottom: none;
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0; }
+.select2-container--classic .select2-search--dropdown .select2-search__field {
+ border: 1px solid #aaa;
+ outline: 0; }
+.select2-container--classic .select2-search--inline .select2-search__field {
+ outline: 0; }
+.select2-container--classic .select2-dropdown {
+ background-color: white;
+ border: 1px solid transparent; }
+.select2-container--classic .select2-dropdown--above {
+ border-bottom: none; }
+.select2-container--classic .select2-dropdown--below {
+ border-top: none; }
+.select2-container--classic .select2-results > .select2-results__options {
+ max-height: 200px;
+ overflow-y: auto; }
+.select2-container--classic .select2-results__option[role=group] {
+ padding: 0; }
+.select2-container--classic .select2-results__option[aria-disabled=true] {
+ color: grey; }
+.select2-container--classic .select2-results__option--highlighted[aria-selected] {
+ background-color: #3875d7;
+ color: white; }
+.select2-container--classic .select2-results__group {
+ cursor: default;
+ display: block;
+ padding: 6px; }
+.select2-container--classic.select2-container--open .select2-dropdown {
+ border-color: #5897fb; }
\ No newline at end of file
diff --git a/asset/static/css/switchery/switchery.min.css b/asset/static/css/switchery/switchery.min.css
new file mode 100755
index 0000000..a4a2f74
--- /dev/null
+++ b/asset/static/css/switchery/switchery.min.css
@@ -0,0 +1 @@
+.switchery{background-color:#fff;border:1px solid #dfdfdf;border-radius:20px;cursor:pointer;display:inline-block;height:20px;position:relative;vertical-align:middle;width:32px;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}.switchery>small{background:#fff;border-radius:100%;box-shadow:0 1px 3px rgba(0,0,0,0.4);height:20px;position:absolute;top:0;width:20px}
\ No newline at end of file
diff --git a/asset/static/fonts/css/font-awesome.css b/asset/static/fonts/css/font-awesome.css
new file mode 100755
index 0000000..b2a5fe2
--- /dev/null
+++ b/asset/static/fonts/css/font-awesome.css
@@ -0,0 +1,2086 @@
+/*!
+ * Font Awesome 4.5.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+/* FONT PATH
+ * -------------------------- */
+@font-face {
+ font-family: 'FontAwesome';
+ src: url('../fonts/fontawesome-webfont.eot?v=4.5.0');
+ src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.5.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.5.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.5.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.5.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.5.0#fontawesomeregular') format('svg');
+ font-weight: normal;
+ font-style: normal;
+}
+.fa {
+ display: inline-block;
+ font: normal normal normal 14px/1 FontAwesome;
+ font-size: inherit;
+ text-rendering: auto;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+/* makes the font 33% larger relative to the icon container */
+.fa-lg {
+ font-size: 1.33333333em;
+ line-height: 0.75em;
+ vertical-align: -15%;
+}
+.fa-2x {
+ font-size: 2em;
+}
+.fa-3x {
+ font-size: 3em;
+}
+.fa-4x {
+ font-size: 4em;
+}
+.fa-5x {
+ font-size: 5em;
+}
+.fa-fw {
+ width: 1.28571429em;
+ text-align: center;
+}
+.fa-ul {
+ padding-left: 0;
+ margin-left: 2.14285714em;
+ list-style-type: none;
+}
+.fa-ul > li {
+ position: relative;
+}
+.fa-li {
+ position: absolute;
+ left: -2.14285714em;
+ width: 2.14285714em;
+ top: 0.14285714em;
+ text-align: center;
+}
+.fa-li.fa-lg {
+ left: -1.85714286em;
+}
+.fa-border {
+ padding: .2em .25em .15em;
+ border: solid 0.08em #eeeeee;
+ border-radius: .1em;
+}
+.fa-pull-left {
+ float: left;
+}
+.fa-pull-right {
+ float: right;
+}
+.fa.fa-pull-left {
+ margin-right: .3em;
+}
+.fa.fa-pull-right {
+ margin-left: .3em;
+}
+/* Deprecated as of 4.4.0 */
+.pull-right {
+ float: right;
+}
+.pull-left {
+ float: left;
+}
+.fa.pull-left {
+ margin-right: .3em;
+}
+.fa.pull-right {
+ margin-left: .3em;
+}
+.fa-spin {
+ -webkit-animation: fa-spin 2s infinite linear;
+ animation: fa-spin 2s infinite linear;
+}
+.fa-pulse {
+ -webkit-animation: fa-spin 1s infinite steps(8);
+ animation: fa-spin 1s infinite steps(8);
+}
+@-webkit-keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
+@keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
+.fa-rotate-90 {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
+ -webkit-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+ transform: rotate(90deg);
+}
+.fa-rotate-180 {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
+ -webkit-transform: rotate(180deg);
+ -ms-transform: rotate(180deg);
+ transform: rotate(180deg);
+}
+.fa-rotate-270 {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
+ -webkit-transform: rotate(270deg);
+ -ms-transform: rotate(270deg);
+ transform: rotate(270deg);
+}
+.fa-flip-horizontal {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
+ -webkit-transform: scale(-1, 1);
+ -ms-transform: scale(-1, 1);
+ transform: scale(-1, 1);
+}
+.fa-flip-vertical {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
+ -webkit-transform: scale(1, -1);
+ -ms-transform: scale(1, -1);
+ transform: scale(1, -1);
+}
+:root .fa-rotate-90,
+:root .fa-rotate-180,
+:root .fa-rotate-270,
+:root .fa-flip-horizontal,
+:root .fa-flip-vertical {
+ filter: none;
+}
+.fa-stack {
+ position: relative;
+ display: inline-block;
+ width: 2em;
+ height: 2em;
+ line-height: 2em;
+ vertical-align: middle;
+}
+.fa-stack-1x,
+.fa-stack-2x {
+ position: absolute;
+ left: 0;
+ width: 100%;
+ text-align: center;
+}
+.fa-stack-1x {
+ line-height: inherit;
+}
+.fa-stack-2x {
+ font-size: 2em;
+}
+.fa-inverse {
+ color: #ffffff;
+}
+/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
+ readers do not read off random characters that represent icons */
+.fa-glass:before {
+ content: "\f000";
+}
+.fa-music:before {
+ content: "\f001";
+}
+.fa-search:before {
+ content: "\f002";
+}
+.fa-envelope-o:before {
+ content: "\f003";
+}
+.fa-heart:before {
+ content: "\f004";
+}
+.fa-star:before {
+ content: "\f005";
+}
+.fa-star-o:before {
+ content: "\f006";
+}
+.fa-user:before {
+ content: "\f007";
+}
+.fa-film:before {
+ content: "\f008";
+}
+.fa-th-large:before {
+ content: "\f009";
+}
+.fa-th:before {
+ content: "\f00a";
+}
+.fa-th-list:before {
+ content: "\f00b";
+}
+.fa-check:before {
+ content: "\f00c";
+}
+.fa-remove:before,
+.fa-close:before,
+.fa-times:before {
+ content: "\f00d";
+}
+.fa-search-plus:before {
+ content: "\f00e";
+}
+.fa-search-minus:before {
+ content: "\f010";
+}
+.fa-power-off:before {
+ content: "\f011";
+}
+.fa-signal:before {
+ content: "\f012";
+}
+.fa-gear:before,
+.fa-cog:before {
+ content: "\f013";
+}
+.fa-trash-o:before {
+ content: "\f014";
+}
+.fa-home:before {
+ content: "\f015";
+}
+.fa-file-o:before {
+ content: "\f016";
+}
+.fa-clock-o:before {
+ content: "\f017";
+}
+.fa-road:before {
+ content: "\f018";
+}
+.fa-download:before {
+ content: "\f019";
+}
+.fa-arrow-circle-o-down:before {
+ content: "\f01a";
+}
+.fa-arrow-circle-o-up:before {
+ content: "\f01b";
+}
+.fa-inbox:before {
+ content: "\f01c";
+}
+.fa-play-circle-o:before {
+ content: "\f01d";
+}
+.fa-rotate-right:before,
+.fa-repeat:before {
+ content: "\f01e";
+}
+.fa-refresh:before {
+ content: "\f021";
+}
+.fa-list-alt:before {
+ content: "\f022";
+}
+.fa-lock:before {
+ content: "\f023";
+}
+.fa-flag:before {
+ content: "\f024";
+}
+.fa-headphones:before {
+ content: "\f025";
+}
+.fa-volume-off:before {
+ content: "\f026";
+}
+.fa-volume-down:before {
+ content: "\f027";
+}
+.fa-volume-up:before {
+ content: "\f028";
+}
+.fa-qrcode:before {
+ content: "\f029";
+}
+.fa-barcode:before {
+ content: "\f02a";
+}
+.fa-tag:before {
+ content: "\f02b";
+}
+.fa-tags:before {
+ content: "\f02c";
+}
+.fa-book:before {
+ content: "\f02d";
+}
+.fa-bookmark:before {
+ content: "\f02e";
+}
+.fa-print:before {
+ content: "\f02f";
+}
+.fa-camera:before {
+ content: "\f030";
+}
+.fa-font:before {
+ content: "\f031";
+}
+.fa-bold:before {
+ content: "\f032";
+}
+.fa-italic:before {
+ content: "\f033";
+}
+.fa-text-height:before {
+ content: "\f034";
+}
+.fa-text-width:before {
+ content: "\f035";
+}
+.fa-align-left:before {
+ content: "\f036";
+}
+.fa-align-center:before {
+ content: "\f037";
+}
+.fa-align-right:before {
+ content: "\f038";
+}
+.fa-align-justify:before {
+ content: "\f039";
+}
+.fa-list:before {
+ content: "\f03a";
+}
+.fa-dedent:before,
+.fa-outdent:before {
+ content: "\f03b";
+}
+.fa-indent:before {
+ content: "\f03c";
+}
+.fa-video-camera:before {
+ content: "\f03d";
+}
+.fa-photo:before,
+.fa-image:before,
+.fa-picture-o:before {
+ content: "\f03e";
+}
+.fa-pencil:before {
+ content: "\f040";
+}
+.fa-map-marker:before {
+ content: "\f041";
+}
+.fa-adjust:before {
+ content: "\f042";
+}
+.fa-tint:before {
+ content: "\f043";
+}
+.fa-edit:before,
+.fa-pencil-square-o:before {
+ content: "\f044";
+}
+.fa-share-square-o:before {
+ content: "\f045";
+}
+.fa-check-square-o:before {
+ content: "\f046";
+}
+.fa-arrows:before {
+ content: "\f047";
+}
+.fa-step-backward:before {
+ content: "\f048";
+}
+.fa-fast-backward:before {
+ content: "\f049";
+}
+.fa-backward:before {
+ content: "\f04a";
+}
+.fa-play:before {
+ content: "\f04b";
+}
+.fa-pause:before {
+ content: "\f04c";
+}
+.fa-stop:before {
+ content: "\f04d";
+}
+.fa-forward:before {
+ content: "\f04e";
+}
+.fa-fast-forward:before {
+ content: "\f050";
+}
+.fa-step-forward:before {
+ content: "\f051";
+}
+.fa-eject:before {
+ content: "\f052";
+}
+.fa-chevron-left:before {
+ content: "\f053";
+}
+.fa-chevron-right:before {
+ content: "\f054";
+}
+.fa-plus-circle:before {
+ content: "\f055";
+}
+.fa-minus-circle:before {
+ content: "\f056";
+}
+.fa-times-circle:before {
+ content: "\f057";
+}
+.fa-check-circle:before {
+ content: "\f058";
+}
+.fa-question-circle:before {
+ content: "\f059";
+}
+.fa-info-circle:before {
+ content: "\f05a";
+}
+.fa-crosshairs:before {
+ content: "\f05b";
+}
+.fa-times-circle-o:before {
+ content: "\f05c";
+}
+.fa-check-circle-o:before {
+ content: "\f05d";
+}
+.fa-ban:before {
+ content: "\f05e";
+}
+.fa-arrow-left:before {
+ content: "\f060";
+}
+.fa-arrow-right:before {
+ content: "\f061";
+}
+.fa-arrow-up:before {
+ content: "\f062";
+}
+.fa-arrow-down:before {
+ content: "\f063";
+}
+.fa-mail-forward:before,
+.fa-share:before {
+ content: "\f064";
+}
+.fa-expand:before {
+ content: "\f065";
+}
+.fa-compress:before {
+ content: "\f066";
+}
+.fa-plus:before {
+ content: "\f067";
+}
+.fa-minus:before {
+ content: "\f068";
+}
+.fa-asterisk:before {
+ content: "\f069";
+}
+.fa-exclamation-circle:before {
+ content: "\f06a";
+}
+.fa-gift:before {
+ content: "\f06b";
+}
+.fa-leaf:before {
+ content: "\f06c";
+}
+.fa-fire:before {
+ content: "\f06d";
+}
+.fa-eye:before {
+ content: "\f06e";
+}
+.fa-eye-slash:before {
+ content: "\f070";
+}
+.fa-warning:before,
+.fa-exclamation-triangle:before {
+ content: "\f071";
+}
+.fa-plane:before {
+ content: "\f072";
+}
+.fa-calendar:before {
+ content: "\f073";
+}
+.fa-random:before {
+ content: "\f074";
+}
+.fa-comment:before {
+ content: "\f075";
+}
+.fa-magnet:before {
+ content: "\f076";
+}
+.fa-chevron-up:before {
+ content: "\f077";
+}
+.fa-chevron-down:before {
+ content: "\f078";
+}
+.fa-retweet:before {
+ content: "\f079";
+}
+.fa-shopping-cart:before {
+ content: "\f07a";
+}
+.fa-folder:before {
+ content: "\f07b";
+}
+.fa-folder-open:before {
+ content: "\f07c";
+}
+.fa-arrows-v:before {
+ content: "\f07d";
+}
+.fa-arrows-h:before {
+ content: "\f07e";
+}
+.fa-bar-chart-o:before,
+.fa-bar-chart:before {
+ content: "\f080";
+}
+.fa-twitter-square:before {
+ content: "\f081";
+}
+.fa-facebook-square:before {
+ content: "\f082";
+}
+.fa-camera-retro:before {
+ content: "\f083";
+}
+.fa-key:before {
+ content: "\f084";
+}
+.fa-gears:before,
+.fa-cogs:before {
+ content: "\f085";
+}
+.fa-comments:before {
+ content: "\f086";
+}
+.fa-thumbs-o-up:before {
+ content: "\f087";
+}
+.fa-thumbs-o-down:before {
+ content: "\f088";
+}
+.fa-star-half:before {
+ content: "\f089";
+}
+.fa-heart-o:before {
+ content: "\f08a";
+}
+.fa-sign-out:before {
+ content: "\f08b";
+}
+.fa-linkedin-square:before {
+ content: "\f08c";
+}
+.fa-thumb-tack:before {
+ content: "\f08d";
+}
+.fa-external-link:before {
+ content: "\f08e";
+}
+.fa-sign-in:before {
+ content: "\f090";
+}
+.fa-trophy:before {
+ content: "\f091";
+}
+.fa-github-square:before {
+ content: "\f092";
+}
+.fa-upload:before {
+ content: "\f093";
+}
+.fa-lemon-o:before {
+ content: "\f094";
+}
+.fa-phone:before {
+ content: "\f095";
+}
+.fa-square-o:before {
+ content: "\f096";
+}
+.fa-bookmark-o:before {
+ content: "\f097";
+}
+.fa-phone-square:before {
+ content: "\f098";
+}
+.fa-twitter:before {
+ content: "\f099";
+}
+.fa-facebook-f:before,
+.fa-facebook:before {
+ content: "\f09a";
+}
+.fa-github:before {
+ content: "\f09b";
+}
+.fa-unlock:before {
+ content: "\f09c";
+}
+.fa-credit-card:before {
+ content: "\f09d";
+}
+.fa-feed:before,
+.fa-rss:before {
+ content: "\f09e";
+}
+.fa-hdd-o:before {
+ content: "\f0a0";
+}
+.fa-bullhorn:before {
+ content: "\f0a1";
+}
+.fa-bell:before {
+ content: "\f0f3";
+}
+.fa-certificate:before {
+ content: "\f0a3";
+}
+.fa-hand-o-right:before {
+ content: "\f0a4";
+}
+.fa-hand-o-left:before {
+ content: "\f0a5";
+}
+.fa-hand-o-up:before {
+ content: "\f0a6";
+}
+.fa-hand-o-down:before {
+ content: "\f0a7";
+}
+.fa-arrow-circle-left:before {
+ content: "\f0a8";
+}
+.fa-arrow-circle-right:before {
+ content: "\f0a9";
+}
+.fa-arrow-circle-up:before {
+ content: "\f0aa";
+}
+.fa-arrow-circle-down:before {
+ content: "\f0ab";
+}
+.fa-globe:before {
+ content: "\f0ac";
+}
+.fa-wrench:before {
+ content: "\f0ad";
+}
+.fa-tasks:before {
+ content: "\f0ae";
+}
+.fa-filter:before {
+ content: "\f0b0";
+}
+.fa-briefcase:before {
+ content: "\f0b1";
+}
+.fa-arrows-alt:before {
+ content: "\f0b2";
+}
+.fa-group:before,
+.fa-users:before {
+ content: "\f0c0";
+}
+.fa-chain:before,
+.fa-link:before {
+ content: "\f0c1";
+}
+.fa-cloud:before {
+ content: "\f0c2";
+}
+.fa-flask:before {
+ content: "\f0c3";
+}
+.fa-cut:before,
+.fa-scissors:before {
+ content: "\f0c4";
+}
+.fa-copy:before,
+.fa-files-o:before {
+ content: "\f0c5";
+}
+.fa-paperclip:before {
+ content: "\f0c6";
+}
+.fa-save:before,
+.fa-floppy-o:before {
+ content: "\f0c7";
+}
+.fa-square:before {
+ content: "\f0c8";
+}
+.fa-navicon:before,
+.fa-reorder:before,
+.fa-bars:before {
+ content: "\f0c9";
+}
+.fa-list-ul:before {
+ content: "\f0ca";
+}
+.fa-list-ol:before {
+ content: "\f0cb";
+}
+.fa-strikethrough:before {
+ content: "\f0cc";
+}
+.fa-underline:before {
+ content: "\f0cd";
+}
+.fa-table:before {
+ content: "\f0ce";
+}
+.fa-magic:before {
+ content: "\f0d0";
+}
+.fa-truck:before {
+ content: "\f0d1";
+}
+.fa-pinterest:before {
+ content: "\f0d2";
+}
+.fa-pinterest-square:before {
+ content: "\f0d3";
+}
+.fa-google-plus-square:before {
+ content: "\f0d4";
+}
+.fa-google-plus:before {
+ content: "\f0d5";
+}
+.fa-money:before {
+ content: "\f0d6";
+}
+.fa-caret-down:before {
+ content: "\f0d7";
+}
+.fa-caret-up:before {
+ content: "\f0d8";
+}
+.fa-caret-left:before {
+ content: "\f0d9";
+}
+.fa-caret-right:before {
+ content: "\f0da";
+}
+.fa-columns:before {
+ content: "\f0db";
+}
+.fa-unsorted:before,
+.fa-sort:before {
+ content: "\f0dc";
+}
+.fa-sort-down:before,
+.fa-sort-desc:before {
+ content: "\f0dd";
+}
+.fa-sort-up:before,
+.fa-sort-asc:before {
+ content: "\f0de";
+}
+.fa-envelope:before {
+ content: "\f0e0";
+}
+.fa-linkedin:before {
+ content: "\f0e1";
+}
+.fa-rotate-left:before,
+.fa-undo:before {
+ content: "\f0e2";
+}
+.fa-legal:before,
+.fa-gavel:before {
+ content: "\f0e3";
+}
+.fa-dashboard:before,
+.fa-tachometer:before {
+ content: "\f0e4";
+}
+.fa-comment-o:before {
+ content: "\f0e5";
+}
+.fa-comments-o:before {
+ content: "\f0e6";
+}
+.fa-flash:before,
+.fa-bolt:before {
+ content: "\f0e7";
+}
+.fa-sitemap:before {
+ content: "\f0e8";
+}
+.fa-umbrella:before {
+ content: "\f0e9";
+}
+.fa-paste:before,
+.fa-clipboard:before {
+ content: "\f0ea";
+}
+.fa-lightbulb-o:before {
+ content: "\f0eb";
+}
+.fa-exchange:before {
+ content: "\f0ec";
+}
+.fa-cloud-download:before {
+ content: "\f0ed";
+}
+.fa-cloud-upload:before {
+ content: "\f0ee";
+}
+.fa-user-md:before {
+ content: "\f0f0";
+}
+.fa-stethoscope:before {
+ content: "\f0f1";
+}
+.fa-suitcase:before {
+ content: "\f0f2";
+}
+.fa-bell-o:before {
+ content: "\f0a2";
+}
+.fa-coffee:before {
+ content: "\f0f4";
+}
+.fa-cutlery:before {
+ content: "\f0f5";
+}
+.fa-file-text-o:before {
+ content: "\f0f6";
+}
+.fa-building-o:before {
+ content: "\f0f7";
+}
+.fa-hospital-o:before {
+ content: "\f0f8";
+}
+.fa-ambulance:before {
+ content: "\f0f9";
+}
+.fa-medkit:before {
+ content: "\f0fa";
+}
+.fa-fighter-jet:before {
+ content: "\f0fb";
+}
+.fa-beer:before {
+ content: "\f0fc";
+}
+.fa-h-square:before {
+ content: "\f0fd";
+}
+.fa-plus-square:before {
+ content: "\f0fe";
+}
+.fa-angle-double-left:before {
+ content: "\f100";
+}
+.fa-angle-double-right:before {
+ content: "\f101";
+}
+.fa-angle-double-up:before {
+ content: "\f102";
+}
+.fa-angle-double-down:before {
+ content: "\f103";
+}
+.fa-angle-left:before {
+ content: "\f104";
+}
+.fa-angle-right:before {
+ content: "\f105";
+}
+.fa-angle-up:before {
+ content: "\f106";
+}
+.fa-angle-down:before {
+ content: "\f107";
+}
+.fa-desktop:before {
+ content: "\f108";
+}
+.fa-laptop:before {
+ content: "\f109";
+}
+.fa-tablet:before {
+ content: "\f10a";
+}
+.fa-mobile-phone:before,
+.fa-mobile:before {
+ content: "\f10b";
+}
+.fa-circle-o:before {
+ content: "\f10c";
+}
+.fa-quote-left:before {
+ content: "\f10d";
+}
+.fa-quote-right:before {
+ content: "\f10e";
+}
+.fa-spinner:before {
+ content: "\f110";
+}
+.fa-circle:before {
+ content: "\f111";
+}
+.fa-mail-reply:before,
+.fa-reply:before {
+ content: "\f112";
+}
+.fa-github-alt:before {
+ content: "\f113";
+}
+.fa-folder-o:before {
+ content: "\f114";
+}
+.fa-folder-open-o:before {
+ content: "\f115";
+}
+.fa-smile-o:before {
+ content: "\f118";
+}
+.fa-frown-o:before {
+ content: "\f119";
+}
+.fa-meh-o:before {
+ content: "\f11a";
+}
+.fa-gamepad:before {
+ content: "\f11b";
+}
+.fa-keyboard-o:before {
+ content: "\f11c";
+}
+.fa-flag-o:before {
+ content: "\f11d";
+}
+.fa-flag-checkered:before {
+ content: "\f11e";
+}
+.fa-terminal:before {
+ content: "\f120";
+}
+.fa-code:before {
+ content: "\f121";
+}
+.fa-mail-reply-all:before,
+.fa-reply-all:before {
+ content: "\f122";
+}
+.fa-star-half-empty:before,
+.fa-star-half-full:before,
+.fa-star-half-o:before {
+ content: "\f123";
+}
+.fa-location-arrow:before {
+ content: "\f124";
+}
+.fa-crop:before {
+ content: "\f125";
+}
+.fa-code-fork:before {
+ content: "\f126";
+}
+.fa-unlink:before,
+.fa-chain-broken:before {
+ content: "\f127";
+}
+.fa-question:before {
+ content: "\f128";
+}
+.fa-info:before {
+ content: "\f129";
+}
+.fa-exclamation:before {
+ content: "\f12a";
+}
+.fa-superscript:before {
+ content: "\f12b";
+}
+.fa-subscript:before {
+ content: "\f12c";
+}
+.fa-eraser:before {
+ content: "\f12d";
+}
+.fa-puzzle-piece:before {
+ content: "\f12e";
+}
+.fa-microphone:before {
+ content: "\f130";
+}
+.fa-microphone-slash:before {
+ content: "\f131";
+}
+.fa-shield:before {
+ content: "\f132";
+}
+.fa-calendar-o:before {
+ content: "\f133";
+}
+.fa-fire-extinguisher:before {
+ content: "\f134";
+}
+.fa-rocket:before {
+ content: "\f135";
+}
+.fa-maxcdn:before {
+ content: "\f136";
+}
+.fa-chevron-circle-left:before {
+ content: "\f137";
+}
+.fa-chevron-circle-right:before {
+ content: "\f138";
+}
+.fa-chevron-circle-up:before {
+ content: "\f139";
+}
+.fa-chevron-circle-down:before {
+ content: "\f13a";
+}
+.fa-html5:before {
+ content: "\f13b";
+}
+.fa-css3:before {
+ content: "\f13c";
+}
+.fa-anchor:before {
+ content: "\f13d";
+}
+.fa-unlock-alt:before {
+ content: "\f13e";
+}
+.fa-bullseye:before {
+ content: "\f140";
+}
+.fa-ellipsis-h:before {
+ content: "\f141";
+}
+.fa-ellipsis-v:before {
+ content: "\f142";
+}
+.fa-rss-square:before {
+ content: "\f143";
+}
+.fa-play-circle:before {
+ content: "\f144";
+}
+.fa-ticket:before {
+ content: "\f145";
+}
+.fa-minus-square:before {
+ content: "\f146";
+}
+.fa-minus-square-o:before {
+ content: "\f147";
+}
+.fa-level-up:before {
+ content: "\f148";
+}
+.fa-level-down:before {
+ content: "\f149";
+}
+.fa-check-square:before {
+ content: "\f14a";
+}
+.fa-pencil-square:before {
+ content: "\f14b";
+}
+.fa-external-link-square:before {
+ content: "\f14c";
+}
+.fa-share-square:before {
+ content: "\f14d";
+}
+.fa-compass:before {
+ content: "\f14e";
+}
+.fa-toggle-down:before,
+.fa-caret-square-o-down:before {
+ content: "\f150";
+}
+.fa-toggle-up:before,
+.fa-caret-square-o-up:before {
+ content: "\f151";
+}
+.fa-toggle-right:before,
+.fa-caret-square-o-right:before {
+ content: "\f152";
+}
+.fa-euro:before,
+.fa-eur:before {
+ content: "\f153";
+}
+.fa-gbp:before {
+ content: "\f154";
+}
+.fa-dollar:before,
+.fa-usd:before {
+ content: "\f155";
+}
+.fa-rupee:before,
+.fa-inr:before {
+ content: "\f156";
+}
+.fa-cny:before,
+.fa-rmb:before,
+.fa-yen:before,
+.fa-jpy:before {
+ content: "\f157";
+}
+.fa-ruble:before,
+.fa-rouble:before,
+.fa-rub:before {
+ content: "\f158";
+}
+.fa-won:before,
+.fa-krw:before {
+ content: "\f159";
+}
+.fa-bitcoin:before,
+.fa-btc:before {
+ content: "\f15a";
+}
+.fa-file:before {
+ content: "\f15b";
+}
+.fa-file-text:before {
+ content: "\f15c";
+}
+.fa-sort-alpha-asc:before {
+ content: "\f15d";
+}
+.fa-sort-alpha-desc:before {
+ content: "\f15e";
+}
+.fa-sort-amount-asc:before {
+ content: "\f160";
+}
+.fa-sort-amount-desc:before {
+ content: "\f161";
+}
+.fa-sort-numeric-asc:before {
+ content: "\f162";
+}
+.fa-sort-numeric-desc:before {
+ content: "\f163";
+}
+.fa-thumbs-up:before {
+ content: "\f164";
+}
+.fa-thumbs-down:before {
+ content: "\f165";
+}
+.fa-youtube-square:before {
+ content: "\f166";
+}
+.fa-youtube:before {
+ content: "\f167";
+}
+.fa-xing:before {
+ content: "\f168";
+}
+.fa-xing-square:before {
+ content: "\f169";
+}
+.fa-youtube-play:before {
+ content: "\f16a";
+}
+.fa-dropbox:before {
+ content: "\f16b";
+}
+.fa-stack-overflow:before {
+ content: "\f16c";
+}
+.fa-instagram:before {
+ content: "\f16d";
+}
+.fa-flickr:before {
+ content: "\f16e";
+}
+.fa-adn:before {
+ content: "\f170";
+}
+.fa-bitbucket:before {
+ content: "\f171";
+}
+.fa-bitbucket-square:before {
+ content: "\f172";
+}
+.fa-tumblr:before {
+ content: "\f173";
+}
+.fa-tumblr-square:before {
+ content: "\f174";
+}
+.fa-long-arrow-down:before {
+ content: "\f175";
+}
+.fa-long-arrow-up:before {
+ content: "\f176";
+}
+.fa-long-arrow-left:before {
+ content: "\f177";
+}
+.fa-long-arrow-right:before {
+ content: "\f178";
+}
+.fa-apple:before {
+ content: "\f179";
+}
+.fa-windows:before {
+ content: "\f17a";
+}
+.fa-android:before {
+ content: "\f17b";
+}
+.fa-linux:before {
+ content: "\f17c";
+}
+.fa-dribbble:before {
+ content: "\f17d";
+}
+.fa-skype:before {
+ content: "\f17e";
+}
+.fa-foursquare:before {
+ content: "\f180";
+}
+.fa-trello:before {
+ content: "\f181";
+}
+.fa-female:before {
+ content: "\f182";
+}
+.fa-male:before {
+ content: "\f183";
+}
+.fa-gittip:before,
+.fa-gratipay:before {
+ content: "\f184";
+}
+.fa-sun-o:before {
+ content: "\f185";
+}
+.fa-moon-o:before {
+ content: "\f186";
+}
+.fa-archive:before {
+ content: "\f187";
+}
+.fa-bug:before {
+ content: "\f188";
+}
+.fa-vk:before {
+ content: "\f189";
+}
+.fa-weibo:before {
+ content: "\f18a";
+}
+.fa-renren:before {
+ content: "\f18b";
+}
+.fa-pagelines:before {
+ content: "\f18c";
+}
+.fa-stack-exchange:before {
+ content: "\f18d";
+}
+.fa-arrow-circle-o-right:before {
+ content: "\f18e";
+}
+.fa-arrow-circle-o-left:before {
+ content: "\f190";
+}
+.fa-toggle-left:before,
+.fa-caret-square-o-left:before {
+ content: "\f191";
+}
+.fa-dot-circle-o:before {
+ content: "\f192";
+}
+.fa-wheelchair:before {
+ content: "\f193";
+}
+.fa-vimeo-square:before {
+ content: "\f194";
+}
+.fa-turkish-lira:before,
+.fa-try:before {
+ content: "\f195";
+}
+.fa-plus-square-o:before {
+ content: "\f196";
+}
+.fa-space-shuttle:before {
+ content: "\f197";
+}
+.fa-slack:before {
+ content: "\f198";
+}
+.fa-envelope-square:before {
+ content: "\f199";
+}
+.fa-wordpress:before {
+ content: "\f19a";
+}
+.fa-openid:before {
+ content: "\f19b";
+}
+.fa-institution:before,
+.fa-bank:before,
+.fa-university:before {
+ content: "\f19c";
+}
+.fa-mortar-board:before,
+.fa-graduation-cap:before {
+ content: "\f19d";
+}
+.fa-yahoo:before {
+ content: "\f19e";
+}
+.fa-google:before {
+ content: "\f1a0";
+}
+.fa-reddit:before {
+ content: "\f1a1";
+}
+.fa-reddit-square:before {
+ content: "\f1a2";
+}
+.fa-stumbleupon-circle:before {
+ content: "\f1a3";
+}
+.fa-stumbleupon:before {
+ content: "\f1a4";
+}
+.fa-delicious:before {
+ content: "\f1a5";
+}
+.fa-digg:before {
+ content: "\f1a6";
+}
+.fa-pied-piper:before {
+ content: "\f1a7";
+}
+.fa-pied-piper-alt:before {
+ content: "\f1a8";
+}
+.fa-drupal:before {
+ content: "\f1a9";
+}
+.fa-joomla:before {
+ content: "\f1aa";
+}
+.fa-language:before {
+ content: "\f1ab";
+}
+.fa-fax:before {
+ content: "\f1ac";
+}
+.fa-building:before {
+ content: "\f1ad";
+}
+.fa-child:before {
+ content: "\f1ae";
+}
+.fa-paw:before {
+ content: "\f1b0";
+}
+.fa-spoon:before {
+ content: "\f1b1";
+}
+.fa-cube:before {
+ content: "\f1b2";
+}
+.fa-cubes:before {
+ content: "\f1b3";
+}
+.fa-behance:before {
+ content: "\f1b4";
+}
+.fa-behance-square:before {
+ content: "\f1b5";
+}
+.fa-steam:before {
+ content: "\f1b6";
+}
+.fa-steam-square:before {
+ content: "\f1b7";
+}
+.fa-recycle:before {
+ content: "\f1b8";
+}
+.fa-automobile:before,
+.fa-car:before {
+ content: "\f1b9";
+}
+.fa-cab:before,
+.fa-taxi:before {
+ content: "\f1ba";
+}
+.fa-tree:before {
+ content: "\f1bb";
+}
+.fa-spotify:before {
+ content: "\f1bc";
+}
+.fa-deviantart:before {
+ content: "\f1bd";
+}
+.fa-soundcloud:before {
+ content: "\f1be";
+}
+.fa-database:before {
+ content: "\f1c0";
+}
+.fa-file-pdf-o:before {
+ content: "\f1c1";
+}
+.fa-file-word-o:before {
+ content: "\f1c2";
+}
+.fa-file-excel-o:before {
+ content: "\f1c3";
+}
+.fa-file-powerpoint-o:before {
+ content: "\f1c4";
+}
+.fa-file-photo-o:before,
+.fa-file-picture-o:before,
+.fa-file-image-o:before {
+ content: "\f1c5";
+}
+.fa-file-zip-o:before,
+.fa-file-archive-o:before {
+ content: "\f1c6";
+}
+.fa-file-sound-o:before,
+.fa-file-audio-o:before {
+ content: "\f1c7";
+}
+.fa-file-movie-o:before,
+.fa-file-video-o:before {
+ content: "\f1c8";
+}
+.fa-file-code-o:before {
+ content: "\f1c9";
+}
+.fa-vine:before {
+ content: "\f1ca";
+}
+.fa-codepen:before {
+ content: "\f1cb";
+}
+.fa-jsfiddle:before {
+ content: "\f1cc";
+}
+.fa-life-bouy:before,
+.fa-life-buoy:before,
+.fa-life-saver:before,
+.fa-support:before,
+.fa-life-ring:before {
+ content: "\f1cd";
+}
+.fa-circle-o-notch:before {
+ content: "\f1ce";
+}
+.fa-ra:before,
+.fa-rebel:before {
+ content: "\f1d0";
+}
+.fa-ge:before,
+.fa-empire:before {
+ content: "\f1d1";
+}
+.fa-git-square:before {
+ content: "\f1d2";
+}
+.fa-git:before {
+ content: "\f1d3";
+}
+.fa-y-combinator-square:before,
+.fa-yc-square:before,
+.fa-hacker-news:before {
+ content: "\f1d4";
+}
+.fa-tencent-weibo:before {
+ content: "\f1d5";
+}
+.fa-qq:before {
+ content: "\f1d6";
+}
+.fa-wechat:before,
+.fa-weixin:before {
+ content: "\f1d7";
+}
+.fa-send:before,
+.fa-paper-plane:before {
+ content: "\f1d8";
+}
+.fa-send-o:before,
+.fa-paper-plane-o:before {
+ content: "\f1d9";
+}
+.fa-history:before {
+ content: "\f1da";
+}
+.fa-circle-thin:before {
+ content: "\f1db";
+}
+.fa-header:before {
+ content: "\f1dc";
+}
+.fa-paragraph:before {
+ content: "\f1dd";
+}
+.fa-sliders:before {
+ content: "\f1de";
+}
+.fa-share-alt:before {
+ content: "\f1e0";
+}
+.fa-share-alt-square:before {
+ content: "\f1e1";
+}
+.fa-bomb:before {
+ content: "\f1e2";
+}
+.fa-soccer-ball-o:before,
+.fa-futbol-o:before {
+ content: "\f1e3";
+}
+.fa-tty:before {
+ content: "\f1e4";
+}
+.fa-binoculars:before {
+ content: "\f1e5";
+}
+.fa-plug:before {
+ content: "\f1e6";
+}
+.fa-slideshare:before {
+ content: "\f1e7";
+}
+.fa-twitch:before {
+ content: "\f1e8";
+}
+.fa-yelp:before {
+ content: "\f1e9";
+}
+.fa-newspaper-o:before {
+ content: "\f1ea";
+}
+.fa-wifi:before {
+ content: "\f1eb";
+}
+.fa-calculator:before {
+ content: "\f1ec";
+}
+.fa-paypal:before {
+ content: "\f1ed";
+}
+.fa-google-wallet:before {
+ content: "\f1ee";
+}
+.fa-cc-visa:before {
+ content: "\f1f0";
+}
+.fa-cc-mastercard:before {
+ content: "\f1f1";
+}
+.fa-cc-discover:before {
+ content: "\f1f2";
+}
+.fa-cc-amex:before {
+ content: "\f1f3";
+}
+.fa-cc-paypal:before {
+ content: "\f1f4";
+}
+.fa-cc-stripe:before {
+ content: "\f1f5";
+}
+.fa-bell-slash:before {
+ content: "\f1f6";
+}
+.fa-bell-slash-o:before {
+ content: "\f1f7";
+}
+.fa-trash:before {
+ content: "\f1f8";
+}
+.fa-copyright:before {
+ content: "\f1f9";
+}
+.fa-at:before {
+ content: "\f1fa";
+}
+.fa-eyedropper:before {
+ content: "\f1fb";
+}
+.fa-paint-brush:before {
+ content: "\f1fc";
+}
+.fa-birthday-cake:before {
+ content: "\f1fd";
+}
+.fa-area-chart:before {
+ content: "\f1fe";
+}
+.fa-pie-chart:before {
+ content: "\f200";
+}
+.fa-line-chart:before {
+ content: "\f201";
+}
+.fa-lastfm:before {
+ content: "\f202";
+}
+.fa-lastfm-square:before {
+ content: "\f203";
+}
+.fa-toggle-off:before {
+ content: "\f204";
+}
+.fa-toggle-on:before {
+ content: "\f205";
+}
+.fa-bicycle:before {
+ content: "\f206";
+}
+.fa-bus:before {
+ content: "\f207";
+}
+.fa-ioxhost:before {
+ content: "\f208";
+}
+.fa-angellist:before {
+ content: "\f209";
+}
+.fa-cc:before {
+ content: "\f20a";
+}
+.fa-shekel:before,
+.fa-sheqel:before,
+.fa-ils:before {
+ content: "\f20b";
+}
+.fa-meanpath:before {
+ content: "\f20c";
+}
+.fa-buysellads:before {
+ content: "\f20d";
+}
+.fa-connectdevelop:before {
+ content: "\f20e";
+}
+.fa-dashcube:before {
+ content: "\f210";
+}
+.fa-forumbee:before {
+ content: "\f211";
+}
+.fa-leanpub:before {
+ content: "\f212";
+}
+.fa-sellsy:before {
+ content: "\f213";
+}
+.fa-shirtsinbulk:before {
+ content: "\f214";
+}
+.fa-simplybuilt:before {
+ content: "\f215";
+}
+.fa-skyatlas:before {
+ content: "\f216";
+}
+.fa-cart-plus:before {
+ content: "\f217";
+}
+.fa-cart-arrow-down:before {
+ content: "\f218";
+}
+.fa-diamond:before {
+ content: "\f219";
+}
+.fa-ship:before {
+ content: "\f21a";
+}
+.fa-user-secret:before {
+ content: "\f21b";
+}
+.fa-motorcycle:before {
+ content: "\f21c";
+}
+.fa-street-view:before {
+ content: "\f21d";
+}
+.fa-heartbeat:before {
+ content: "\f21e";
+}
+.fa-venus:before {
+ content: "\f221";
+}
+.fa-mars:before {
+ content: "\f222";
+}
+.fa-mercury:before {
+ content: "\f223";
+}
+.fa-intersex:before,
+.fa-transgender:before {
+ content: "\f224";
+}
+.fa-transgender-alt:before {
+ content: "\f225";
+}
+.fa-venus-double:before {
+ content: "\f226";
+}
+.fa-mars-double:before {
+ content: "\f227";
+}
+.fa-venus-mars:before {
+ content: "\f228";
+}
+.fa-mars-stroke:before {
+ content: "\f229";
+}
+.fa-mars-stroke-v:before {
+ content: "\f22a";
+}
+.fa-mars-stroke-h:before {
+ content: "\f22b";
+}
+.fa-neuter:before {
+ content: "\f22c";
+}
+.fa-genderless:before {
+ content: "\f22d";
+}
+.fa-facebook-official:before {
+ content: "\f230";
+}
+.fa-pinterest-p:before {
+ content: "\f231";
+}
+.fa-whatsapp:before {
+ content: "\f232";
+}
+.fa-server:before {
+ content: "\f233";
+}
+.fa-user-plus:before {
+ content: "\f234";
+}
+.fa-user-times:before {
+ content: "\f235";
+}
+.fa-hotel:before,
+.fa-bed:before {
+ content: "\f236";
+}
+.fa-viacoin:before {
+ content: "\f237";
+}
+.fa-train:before {
+ content: "\f238";
+}
+.fa-subway:before {
+ content: "\f239";
+}
+.fa-medium:before {
+ content: "\f23a";
+}
+.fa-yc:before,
+.fa-y-combinator:before {
+ content: "\f23b";
+}
+.fa-optin-monster:before {
+ content: "\f23c";
+}
+.fa-opencart:before {
+ content: "\f23d";
+}
+.fa-expeditedssl:before {
+ content: "\f23e";
+}
+.fa-battery-4:before,
+.fa-battery-full:before {
+ content: "\f240";
+}
+.fa-battery-3:before,
+.fa-battery-three-quarters:before {
+ content: "\f241";
+}
+.fa-battery-2:before,
+.fa-battery-half:before {
+ content: "\f242";
+}
+.fa-battery-1:before,
+.fa-battery-quarter:before {
+ content: "\f243";
+}
+.fa-battery-0:before,
+.fa-battery-empty:before {
+ content: "\f244";
+}
+.fa-mouse-pointer:before {
+ content: "\f245";
+}
+.fa-i-cursor:before {
+ content: "\f246";
+}
+.fa-object-group:before {
+ content: "\f247";
+}
+.fa-object-ungroup:before {
+ content: "\f248";
+}
+.fa-sticky-note:before {
+ content: "\f249";
+}
+.fa-sticky-note-o:before {
+ content: "\f24a";
+}
+.fa-cc-jcb:before {
+ content: "\f24b";
+}
+.fa-cc-diners-club:before {
+ content: "\f24c";
+}
+.fa-clone:before {
+ content: "\f24d";
+}
+.fa-balance-scale:before {
+ content: "\f24e";
+}
+.fa-hourglass-o:before {
+ content: "\f250";
+}
+.fa-hourglass-1:before,
+.fa-hourglass-start:before {
+ content: "\f251";
+}
+.fa-hourglass-2:before,
+.fa-hourglass-half:before {
+ content: "\f252";
+}
+.fa-hourglass-3:before,
+.fa-hourglass-end:before {
+ content: "\f253";
+}
+.fa-hourglass:before {
+ content: "\f254";
+}
+.fa-hand-grab-o:before,
+.fa-hand-rock-o:before {
+ content: "\f255";
+}
+.fa-hand-stop-o:before,
+.fa-hand-paper-o:before {
+ content: "\f256";
+}
+.fa-hand-scissors-o:before {
+ content: "\f257";
+}
+.fa-hand-lizard-o:before {
+ content: "\f258";
+}
+.fa-hand-spock-o:before {
+ content: "\f259";
+}
+.fa-hand-pointer-o:before {
+ content: "\f25a";
+}
+.fa-hand-peace-o:before {
+ content: "\f25b";
+}
+.fa-trademark:before {
+ content: "\f25c";
+}
+.fa-registered:before {
+ content: "\f25d";
+}
+.fa-creative-commons:before {
+ content: "\f25e";
+}
+.fa-gg:before {
+ content: "\f260";
+}
+.fa-gg-circle:before {
+ content: "\f261";
+}
+.fa-tripadvisor:before {
+ content: "\f262";
+}
+.fa-odnoklassniki:before {
+ content: "\f263";
+}
+.fa-odnoklassniki-square:before {
+ content: "\f264";
+}
+.fa-get-pocket:before {
+ content: "\f265";
+}
+.fa-wikipedia-w:before {
+ content: "\f266";
+}
+.fa-safari:before {
+ content: "\f267";
+}
+.fa-chrome:before {
+ content: "\f268";
+}
+.fa-firefox:before {
+ content: "\f269";
+}
+.fa-opera:before {
+ content: "\f26a";
+}
+.fa-internet-explorer:before {
+ content: "\f26b";
+}
+.fa-tv:before,
+.fa-television:before {
+ content: "\f26c";
+}
+.fa-contao:before {
+ content: "\f26d";
+}
+.fa-500px:before {
+ content: "\f26e";
+}
+.fa-amazon:before {
+ content: "\f270";
+}
+.fa-calendar-plus-o:before {
+ content: "\f271";
+}
+.fa-calendar-minus-o:before {
+ content: "\f272";
+}
+.fa-calendar-times-o:before {
+ content: "\f273";
+}
+.fa-calendar-check-o:before {
+ content: "\f274";
+}
+.fa-industry:before {
+ content: "\f275";
+}
+.fa-map-pin:before {
+ content: "\f276";
+}
+.fa-map-signs:before {
+ content: "\f277";
+}
+.fa-map-o:before {
+ content: "\f278";
+}
+.fa-map:before {
+ content: "\f279";
+}
+.fa-commenting:before {
+ content: "\f27a";
+}
+.fa-commenting-o:before {
+ content: "\f27b";
+}
+.fa-houzz:before {
+ content: "\f27c";
+}
+.fa-vimeo:before {
+ content: "\f27d";
+}
+.fa-black-tie:before {
+ content: "\f27e";
+}
+.fa-fonticons:before {
+ content: "\f280";
+}
+.fa-reddit-alien:before {
+ content: "\f281";
+}
+.fa-edge:before {
+ content: "\f282";
+}
+.fa-credit-card-alt:before {
+ content: "\f283";
+}
+.fa-codiepie:before {
+ content: "\f284";
+}
+.fa-modx:before {
+ content: "\f285";
+}
+.fa-fort-awesome:before {
+ content: "\f286";
+}
+.fa-usb:before {
+ content: "\f287";
+}
+.fa-product-hunt:before {
+ content: "\f288";
+}
+.fa-mixcloud:before {
+ content: "\f289";
+}
+.fa-scribd:before {
+ content: "\f28a";
+}
+.fa-pause-circle:before {
+ content: "\f28b";
+}
+.fa-pause-circle-o:before {
+ content: "\f28c";
+}
+.fa-stop-circle:before {
+ content: "\f28d";
+}
+.fa-stop-circle-o:before {
+ content: "\f28e";
+}
+.fa-shopping-bag:before {
+ content: "\f290";
+}
+.fa-shopping-basket:before {
+ content: "\f291";
+}
+.fa-hashtag:before {
+ content: "\f292";
+}
+.fa-bluetooth:before {
+ content: "\f293";
+}
+.fa-bluetooth-b:before {
+ content: "\f294";
+}
+.fa-percent:before {
+ content: "\f295";
+}
diff --git a/asset/static/fonts/css/font-awesome.min.css b/asset/static/fonts/css/font-awesome.min.css
new file mode 100755
index 0000000..d0603cb
--- /dev/null
+++ b/asset/static/fonts/css/font-awesome.min.css
@@ -0,0 +1,4 @@
+/*!
+ * Font Awesome 4.5.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.5.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.5.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.5.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.5.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.5.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.5.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-intersex:before,.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-genderless:before{content:"\f22d"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}.fa-yc:before,.fa-y-combinator:before{content:"\f23b"}.fa-optin-monster:before{content:"\f23c"}.fa-opencart:before{content:"\f23d"}.fa-expeditedssl:before{content:"\f23e"}.fa-battery-4:before,.fa-battery-full:before{content:"\f240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-battery-2:before,.fa-battery-half:before{content:"\f242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-mouse-pointer:before{content:"\f245"}.fa-i-cursor:before{content:"\f246"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-sticky-note:before{content:"\f249"}.fa-sticky-note-o:before{content:"\f24a"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-diners-club:before{content:"\f24c"}.fa-clone:before{content:"\f24d"}.fa-balance-scale:before{content:"\f24e"}.fa-hourglass-o:before{content:"\f250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass:before{content:"\f254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\f255"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:"\f256"}.fa-hand-scissors-o:before{content:"\f257"}.fa-hand-lizard-o:before{content:"\f258"}.fa-hand-spock-o:before{content:"\f259"}.fa-hand-pointer-o:before{content:"\f25a"}.fa-hand-peace-o:before{content:"\f25b"}.fa-trademark:before{content:"\f25c"}.fa-registered:before{content:"\f25d"}.fa-creative-commons:before{content:"\f25e"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-tripadvisor:before{content:"\f262"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-get-pocket:before{content:"\f265"}.fa-wikipedia-w:before{content:"\f266"}.fa-safari:before{content:"\f267"}.fa-chrome:before{content:"\f268"}.fa-firefox:before{content:"\f269"}.fa-opera:before{content:"\f26a"}.fa-internet-explorer:before{content:"\f26b"}.fa-tv:before,.fa-television:before{content:"\f26c"}.fa-contao:before{content:"\f26d"}.fa-500px:before{content:"\f26e"}.fa-amazon:before{content:"\f270"}.fa-calendar-plus-o:before{content:"\f271"}.fa-calendar-minus-o:before{content:"\f272"}.fa-calendar-times-o:before{content:"\f273"}.fa-calendar-check-o:before{content:"\f274"}.fa-industry:before{content:"\f275"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-map-o:before{content:"\f278"}.fa-map:before{content:"\f279"}.fa-commenting:before{content:"\f27a"}.fa-commenting-o:before{content:"\f27b"}.fa-houzz:before{content:"\f27c"}.fa-vimeo:before{content:"\f27d"}.fa-black-tie:before{content:"\f27e"}.fa-fonticons:before{content:"\f280"}.fa-reddit-alien:before{content:"\f281"}.fa-edge:before{content:"\f282"}.fa-credit-card-alt:before{content:"\f283"}.fa-codiepie:before{content:"\f284"}.fa-modx:before{content:"\f285"}.fa-fort-awesome:before{content:"\f286"}.fa-usb:before{content:"\f287"}.fa-product-hunt:before{content:"\f288"}.fa-mixcloud:before{content:"\f289"}.fa-scribd:before{content:"\f28a"}.fa-pause-circle:before{content:"\f28b"}.fa-pause-circle-o:before{content:"\f28c"}.fa-stop-circle:before{content:"\f28d"}.fa-stop-circle-o:before{content:"\f28e"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-hashtag:before{content:"\f292"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-percent:before{content:"\f295"}
diff --git a/asset/static/fonts/fonts/FontAwesome.otf b/asset/static/fonts/fonts/FontAwesome.otf
new file mode 100755
index 0000000..3ed7f8b
Binary files /dev/null and b/asset/static/fonts/fonts/FontAwesome.otf differ
diff --git a/asset/static/fonts/fonts/fontawesome-webfont.eot b/asset/static/fonts/fonts/fontawesome-webfont.eot
new file mode 100755
index 0000000..9b6afae
Binary files /dev/null and b/asset/static/fonts/fonts/fontawesome-webfont.eot differ
diff --git a/asset/static/fonts/fonts/fontawesome-webfont.svg b/asset/static/fonts/fonts/fontawesome-webfont.svg
new file mode 100755
index 0000000..d05688e
--- /dev/null
+++ b/asset/static/fonts/fonts/fontawesome-webfont.svg
@@ -0,0 +1,655 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/asset/static/fonts/fonts/fontawesome-webfont.ttf b/asset/static/fonts/fonts/fontawesome-webfont.ttf
new file mode 100755
index 0000000..26dea79
Binary files /dev/null and b/asset/static/fonts/fonts/fontawesome-webfont.ttf differ
diff --git a/asset/static/fonts/fonts/fontawesome-webfont.woff b/asset/static/fonts/fonts/fontawesome-webfont.woff
new file mode 100755
index 0000000..dc35ce3
Binary files /dev/null and b/asset/static/fonts/fonts/fontawesome-webfont.woff differ
diff --git a/asset/static/fonts/fonts/fontawesome-webfont.woff2 b/asset/static/fonts/fonts/fontawesome-webfont.woff2
new file mode 100755
index 0000000..500e517
Binary files /dev/null and b/asset/static/fonts/fonts/fontawesome-webfont.woff2 differ
diff --git a/asset/static/fonts/glyphicons-halflings-regular.2svg b/asset/static/fonts/glyphicons-halflings-regular.2svg
new file mode 100755
index 0000000..94fb549
--- /dev/null
+++ b/asset/static/fonts/glyphicons-halflings-regular.2svg
@@ -0,0 +1,288 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/asset/static/fonts/glyphicons-halflings-regular.eot b/asset/static/fonts/glyphicons-halflings-regular.eot
new file mode 100755
index 0000000..b93a495
Binary files /dev/null and b/asset/static/fonts/glyphicons-halflings-regular.eot differ
diff --git a/asset/static/fonts/glyphicons-halflings-regular.svg b/asset/static/fonts/glyphicons-halflings-regular.svg
new file mode 100755
index 0000000..94fb549
--- /dev/null
+++ b/asset/static/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,288 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/asset/static/fonts/glyphicons-halflings-regular.ttf b/asset/static/fonts/glyphicons-halflings-regular.ttf
new file mode 100755
index 0000000..1413fc6
Binary files /dev/null and b/asset/static/fonts/glyphicons-halflings-regular.ttf differ
diff --git a/asset/static/fonts/glyphicons-halflings-regular.woff b/asset/static/fonts/glyphicons-halflings-regular.woff
new file mode 100755
index 0000000..9e61285
Binary files /dev/null and b/asset/static/fonts/glyphicons-halflings-regular.woff differ
diff --git a/asset/static/fonts/glyphicons-halflings-regular.woff2 b/asset/static/fonts/glyphicons-halflings-regular.woff2
new file mode 100755
index 0000000..64539b5
Binary files /dev/null and b/asset/static/fonts/glyphicons-halflings-regular.woff2 differ
diff --git a/asset/static/fonts/less/animated.less b/asset/static/fonts/less/animated.less
new file mode 100755
index 0000000..66ad52a
--- /dev/null
+++ b/asset/static/fonts/less/animated.less
@@ -0,0 +1,34 @@
+// Animated Icons
+// --------------------------
+
+.@{fa-css-prefix}-spin {
+ -webkit-animation: fa-spin 2s infinite linear;
+ animation: fa-spin 2s infinite linear;
+}
+
+.@{fa-css-prefix}-pulse {
+ -webkit-animation: fa-spin 1s infinite steps(8);
+ animation: fa-spin 1s infinite steps(8);
+}
+
+@-webkit-keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
+
+@keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
diff --git a/asset/static/fonts/less/bordered-pulled.less b/asset/static/fonts/less/bordered-pulled.less
new file mode 100755
index 0000000..f1c8ad7
--- /dev/null
+++ b/asset/static/fonts/less/bordered-pulled.less
@@ -0,0 +1,25 @@
+// Bordered & Pulled
+// -------------------------
+
+.@{fa-css-prefix}-border {
+ padding: .2em .25em .15em;
+ border: solid .08em @fa-border-color;
+ border-radius: .1em;
+}
+
+.@{fa-css-prefix}-pull-left { float: left; }
+.@{fa-css-prefix}-pull-right { float: right; }
+
+.@{fa-css-prefix} {
+ &.@{fa-css-prefix}-pull-left { margin-right: .3em; }
+ &.@{fa-css-prefix}-pull-right { margin-left: .3em; }
+}
+
+/* Deprecated as of 4.4.0 */
+.pull-right { float: right; }
+.pull-left { float: left; }
+
+.@{fa-css-prefix} {
+ &.pull-left { margin-right: .3em; }
+ &.pull-right { margin-left: .3em; }
+}
diff --git a/asset/static/fonts/less/core.less b/asset/static/fonts/less/core.less
new file mode 100755
index 0000000..c577ac8
--- /dev/null
+++ b/asset/static/fonts/less/core.less
@@ -0,0 +1,12 @@
+// Base Class Definition
+// -------------------------
+
+.@{fa-css-prefix} {
+ display: inline-block;
+ font: normal normal normal @fa-font-size-base/@fa-line-height-base FontAwesome; // shortening font declaration
+ font-size: inherit; // can't have font-size inherit on line above, so need to override
+ text-rendering: auto; // optimizelegibility throws things off #1094
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+
+}
diff --git a/asset/static/fonts/less/fixed-width.less b/asset/static/fonts/less/fixed-width.less
new file mode 100755
index 0000000..110289f
--- /dev/null
+++ b/asset/static/fonts/less/fixed-width.less
@@ -0,0 +1,6 @@
+// Fixed Width Icons
+// -------------------------
+.@{fa-css-prefix}-fw {
+ width: (18em / 14);
+ text-align: center;
+}
diff --git a/asset/static/fonts/less/font-awesome.less b/asset/static/fonts/less/font-awesome.less
new file mode 100755
index 0000000..c35d3ee
--- /dev/null
+++ b/asset/static/fonts/less/font-awesome.less
@@ -0,0 +1,17 @@
+/*!
+ * Font Awesome 4.5.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+
+@import "variables.less";
+@import "mixins.less";
+@import "path.less";
+@import "core.less";
+@import "larger.less";
+@import "fixed-width.less";
+@import "list.less";
+@import "bordered-pulled.less";
+@import "animated.less";
+@import "rotated-flipped.less";
+@import "stacked.less";
+@import "icons.less";
diff --git a/asset/static/fonts/less/icons.less b/asset/static/fonts/less/icons.less
new file mode 100755
index 0000000..ca60abd
--- /dev/null
+++ b/asset/static/fonts/less/icons.less
@@ -0,0 +1,697 @@
+/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
+ readers do not read off random characters that represent icons */
+
+.@{fa-css-prefix}-glass:before { content: @fa-var-glass; }
+.@{fa-css-prefix}-music:before { content: @fa-var-music; }
+.@{fa-css-prefix}-search:before { content: @fa-var-search; }
+.@{fa-css-prefix}-envelope-o:before { content: @fa-var-envelope-o; }
+.@{fa-css-prefix}-heart:before { content: @fa-var-heart; }
+.@{fa-css-prefix}-star:before { content: @fa-var-star; }
+.@{fa-css-prefix}-star-o:before { content: @fa-var-star-o; }
+.@{fa-css-prefix}-user:before { content: @fa-var-user; }
+.@{fa-css-prefix}-film:before { content: @fa-var-film; }
+.@{fa-css-prefix}-th-large:before { content: @fa-var-th-large; }
+.@{fa-css-prefix}-th:before { content: @fa-var-th; }
+.@{fa-css-prefix}-th-list:before { content: @fa-var-th-list; }
+.@{fa-css-prefix}-check:before { content: @fa-var-check; }
+.@{fa-css-prefix}-remove:before,
+.@{fa-css-prefix}-close:before,
+.@{fa-css-prefix}-times:before { content: @fa-var-times; }
+.@{fa-css-prefix}-search-plus:before { content: @fa-var-search-plus; }
+.@{fa-css-prefix}-search-minus:before { content: @fa-var-search-minus; }
+.@{fa-css-prefix}-power-off:before { content: @fa-var-power-off; }
+.@{fa-css-prefix}-signal:before { content: @fa-var-signal; }
+.@{fa-css-prefix}-gear:before,
+.@{fa-css-prefix}-cog:before { content: @fa-var-cog; }
+.@{fa-css-prefix}-trash-o:before { content: @fa-var-trash-o; }
+.@{fa-css-prefix}-home:before { content: @fa-var-home; }
+.@{fa-css-prefix}-file-o:before { content: @fa-var-file-o; }
+.@{fa-css-prefix}-clock-o:before { content: @fa-var-clock-o; }
+.@{fa-css-prefix}-road:before { content: @fa-var-road; }
+.@{fa-css-prefix}-download:before { content: @fa-var-download; }
+.@{fa-css-prefix}-arrow-circle-o-down:before { content: @fa-var-arrow-circle-o-down; }
+.@{fa-css-prefix}-arrow-circle-o-up:before { content: @fa-var-arrow-circle-o-up; }
+.@{fa-css-prefix}-inbox:before { content: @fa-var-inbox; }
+.@{fa-css-prefix}-play-circle-o:before { content: @fa-var-play-circle-o; }
+.@{fa-css-prefix}-rotate-right:before,
+.@{fa-css-prefix}-repeat:before { content: @fa-var-repeat; }
+.@{fa-css-prefix}-refresh:before { content: @fa-var-refresh; }
+.@{fa-css-prefix}-list-alt:before { content: @fa-var-list-alt; }
+.@{fa-css-prefix}-lock:before { content: @fa-var-lock; }
+.@{fa-css-prefix}-flag:before { content: @fa-var-flag; }
+.@{fa-css-prefix}-headphones:before { content: @fa-var-headphones; }
+.@{fa-css-prefix}-volume-off:before { content: @fa-var-volume-off; }
+.@{fa-css-prefix}-volume-down:before { content: @fa-var-volume-down; }
+.@{fa-css-prefix}-volume-up:before { content: @fa-var-volume-up; }
+.@{fa-css-prefix}-qrcode:before { content: @fa-var-qrcode; }
+.@{fa-css-prefix}-barcode:before { content: @fa-var-barcode; }
+.@{fa-css-prefix}-tag:before { content: @fa-var-tag; }
+.@{fa-css-prefix}-tags:before { content: @fa-var-tags; }
+.@{fa-css-prefix}-book:before { content: @fa-var-book; }
+.@{fa-css-prefix}-bookmark:before { content: @fa-var-bookmark; }
+.@{fa-css-prefix}-print:before { content: @fa-var-print; }
+.@{fa-css-prefix}-camera:before { content: @fa-var-camera; }
+.@{fa-css-prefix}-font:before { content: @fa-var-font; }
+.@{fa-css-prefix}-bold:before { content: @fa-var-bold; }
+.@{fa-css-prefix}-italic:before { content: @fa-var-italic; }
+.@{fa-css-prefix}-text-height:before { content: @fa-var-text-height; }
+.@{fa-css-prefix}-text-width:before { content: @fa-var-text-width; }
+.@{fa-css-prefix}-align-left:before { content: @fa-var-align-left; }
+.@{fa-css-prefix}-align-center:before { content: @fa-var-align-center; }
+.@{fa-css-prefix}-align-right:before { content: @fa-var-align-right; }
+.@{fa-css-prefix}-align-justify:before { content: @fa-var-align-justify; }
+.@{fa-css-prefix}-list:before { content: @fa-var-list; }
+.@{fa-css-prefix}-dedent:before,
+.@{fa-css-prefix}-outdent:before { content: @fa-var-outdent; }
+.@{fa-css-prefix}-indent:before { content: @fa-var-indent; }
+.@{fa-css-prefix}-video-camera:before { content: @fa-var-video-camera; }
+.@{fa-css-prefix}-photo:before,
+.@{fa-css-prefix}-image:before,
+.@{fa-css-prefix}-picture-o:before { content: @fa-var-picture-o; }
+.@{fa-css-prefix}-pencil:before { content: @fa-var-pencil; }
+.@{fa-css-prefix}-map-marker:before { content: @fa-var-map-marker; }
+.@{fa-css-prefix}-adjust:before { content: @fa-var-adjust; }
+.@{fa-css-prefix}-tint:before { content: @fa-var-tint; }
+.@{fa-css-prefix}-edit:before,
+.@{fa-css-prefix}-pencil-square-o:before { content: @fa-var-pencil-square-o; }
+.@{fa-css-prefix}-share-square-o:before { content: @fa-var-share-square-o; }
+.@{fa-css-prefix}-check-square-o:before { content: @fa-var-check-square-o; }
+.@{fa-css-prefix}-arrows:before { content: @fa-var-arrows; }
+.@{fa-css-prefix}-step-backward:before { content: @fa-var-step-backward; }
+.@{fa-css-prefix}-fast-backward:before { content: @fa-var-fast-backward; }
+.@{fa-css-prefix}-backward:before { content: @fa-var-backward; }
+.@{fa-css-prefix}-play:before { content: @fa-var-play; }
+.@{fa-css-prefix}-pause:before { content: @fa-var-pause; }
+.@{fa-css-prefix}-stop:before { content: @fa-var-stop; }
+.@{fa-css-prefix}-forward:before { content: @fa-var-forward; }
+.@{fa-css-prefix}-fast-forward:before { content: @fa-var-fast-forward; }
+.@{fa-css-prefix}-step-forward:before { content: @fa-var-step-forward; }
+.@{fa-css-prefix}-eject:before { content: @fa-var-eject; }
+.@{fa-css-prefix}-chevron-left:before { content: @fa-var-chevron-left; }
+.@{fa-css-prefix}-chevron-right:before { content: @fa-var-chevron-right; }
+.@{fa-css-prefix}-plus-circle:before { content: @fa-var-plus-circle; }
+.@{fa-css-prefix}-minus-circle:before { content: @fa-var-minus-circle; }
+.@{fa-css-prefix}-times-circle:before { content: @fa-var-times-circle; }
+.@{fa-css-prefix}-check-circle:before { content: @fa-var-check-circle; }
+.@{fa-css-prefix}-question-circle:before { content: @fa-var-question-circle; }
+.@{fa-css-prefix}-info-circle:before { content: @fa-var-info-circle; }
+.@{fa-css-prefix}-crosshairs:before { content: @fa-var-crosshairs; }
+.@{fa-css-prefix}-times-circle-o:before { content: @fa-var-times-circle-o; }
+.@{fa-css-prefix}-check-circle-o:before { content: @fa-var-check-circle-o; }
+.@{fa-css-prefix}-ban:before { content: @fa-var-ban; }
+.@{fa-css-prefix}-arrow-left:before { content: @fa-var-arrow-left; }
+.@{fa-css-prefix}-arrow-right:before { content: @fa-var-arrow-right; }
+.@{fa-css-prefix}-arrow-up:before { content: @fa-var-arrow-up; }
+.@{fa-css-prefix}-arrow-down:before { content: @fa-var-arrow-down; }
+.@{fa-css-prefix}-mail-forward:before,
+.@{fa-css-prefix}-share:before { content: @fa-var-share; }
+.@{fa-css-prefix}-expand:before { content: @fa-var-expand; }
+.@{fa-css-prefix}-compress:before { content: @fa-var-compress; }
+.@{fa-css-prefix}-plus:before { content: @fa-var-plus; }
+.@{fa-css-prefix}-minus:before { content: @fa-var-minus; }
+.@{fa-css-prefix}-asterisk:before { content: @fa-var-asterisk; }
+.@{fa-css-prefix}-exclamation-circle:before { content: @fa-var-exclamation-circle; }
+.@{fa-css-prefix}-gift:before { content: @fa-var-gift; }
+.@{fa-css-prefix}-leaf:before { content: @fa-var-leaf; }
+.@{fa-css-prefix}-fire:before { content: @fa-var-fire; }
+.@{fa-css-prefix}-eye:before { content: @fa-var-eye; }
+.@{fa-css-prefix}-eye-slash:before { content: @fa-var-eye-slash; }
+.@{fa-css-prefix}-warning:before,
+.@{fa-css-prefix}-exclamation-triangle:before { content: @fa-var-exclamation-triangle; }
+.@{fa-css-prefix}-plane:before { content: @fa-var-plane; }
+.@{fa-css-prefix}-calendar:before { content: @fa-var-calendar; }
+.@{fa-css-prefix}-random:before { content: @fa-var-random; }
+.@{fa-css-prefix}-comment:before { content: @fa-var-comment; }
+.@{fa-css-prefix}-magnet:before { content: @fa-var-magnet; }
+.@{fa-css-prefix}-chevron-up:before { content: @fa-var-chevron-up; }
+.@{fa-css-prefix}-chevron-down:before { content: @fa-var-chevron-down; }
+.@{fa-css-prefix}-retweet:before { content: @fa-var-retweet; }
+.@{fa-css-prefix}-shopping-cart:before { content: @fa-var-shopping-cart; }
+.@{fa-css-prefix}-folder:before { content: @fa-var-folder; }
+.@{fa-css-prefix}-folder-open:before { content: @fa-var-folder-open; }
+.@{fa-css-prefix}-arrows-v:before { content: @fa-var-arrows-v; }
+.@{fa-css-prefix}-arrows-h:before { content: @fa-var-arrows-h; }
+.@{fa-css-prefix}-bar-chart-o:before,
+.@{fa-css-prefix}-bar-chart:before { content: @fa-var-bar-chart; }
+.@{fa-css-prefix}-twitter-square:before { content: @fa-var-twitter-square; }
+.@{fa-css-prefix}-facebook-square:before { content: @fa-var-facebook-square; }
+.@{fa-css-prefix}-camera-retro:before { content: @fa-var-camera-retro; }
+.@{fa-css-prefix}-key:before { content: @fa-var-key; }
+.@{fa-css-prefix}-gears:before,
+.@{fa-css-prefix}-cogs:before { content: @fa-var-cogs; }
+.@{fa-css-prefix}-comments:before { content: @fa-var-comments; }
+.@{fa-css-prefix}-thumbs-o-up:before { content: @fa-var-thumbs-o-up; }
+.@{fa-css-prefix}-thumbs-o-down:before { content: @fa-var-thumbs-o-down; }
+.@{fa-css-prefix}-star-half:before { content: @fa-var-star-half; }
+.@{fa-css-prefix}-heart-o:before { content: @fa-var-heart-o; }
+.@{fa-css-prefix}-sign-out:before { content: @fa-var-sign-out; }
+.@{fa-css-prefix}-linkedin-square:before { content: @fa-var-linkedin-square; }
+.@{fa-css-prefix}-thumb-tack:before { content: @fa-var-thumb-tack; }
+.@{fa-css-prefix}-external-link:before { content: @fa-var-external-link; }
+.@{fa-css-prefix}-sign-in:before { content: @fa-var-sign-in; }
+.@{fa-css-prefix}-trophy:before { content: @fa-var-trophy; }
+.@{fa-css-prefix}-github-square:before { content: @fa-var-github-square; }
+.@{fa-css-prefix}-upload:before { content: @fa-var-upload; }
+.@{fa-css-prefix}-lemon-o:before { content: @fa-var-lemon-o; }
+.@{fa-css-prefix}-phone:before { content: @fa-var-phone; }
+.@{fa-css-prefix}-square-o:before { content: @fa-var-square-o; }
+.@{fa-css-prefix}-bookmark-o:before { content: @fa-var-bookmark-o; }
+.@{fa-css-prefix}-phone-square:before { content: @fa-var-phone-square; }
+.@{fa-css-prefix}-twitter:before { content: @fa-var-twitter; }
+.@{fa-css-prefix}-facebook-f:before,
+.@{fa-css-prefix}-facebook:before { content: @fa-var-facebook; }
+.@{fa-css-prefix}-github:before { content: @fa-var-github; }
+.@{fa-css-prefix}-unlock:before { content: @fa-var-unlock; }
+.@{fa-css-prefix}-credit-card:before { content: @fa-var-credit-card; }
+.@{fa-css-prefix}-feed:before,
+.@{fa-css-prefix}-rss:before { content: @fa-var-rss; }
+.@{fa-css-prefix}-hdd-o:before { content: @fa-var-hdd-o; }
+.@{fa-css-prefix}-bullhorn:before { content: @fa-var-bullhorn; }
+.@{fa-css-prefix}-bell:before { content: @fa-var-bell; }
+.@{fa-css-prefix}-certificate:before { content: @fa-var-certificate; }
+.@{fa-css-prefix}-hand-o-right:before { content: @fa-var-hand-o-right; }
+.@{fa-css-prefix}-hand-o-left:before { content: @fa-var-hand-o-left; }
+.@{fa-css-prefix}-hand-o-up:before { content: @fa-var-hand-o-up; }
+.@{fa-css-prefix}-hand-o-down:before { content: @fa-var-hand-o-down; }
+.@{fa-css-prefix}-arrow-circle-left:before { content: @fa-var-arrow-circle-left; }
+.@{fa-css-prefix}-arrow-circle-right:before { content: @fa-var-arrow-circle-right; }
+.@{fa-css-prefix}-arrow-circle-up:before { content: @fa-var-arrow-circle-up; }
+.@{fa-css-prefix}-arrow-circle-down:before { content: @fa-var-arrow-circle-down; }
+.@{fa-css-prefix}-globe:before { content: @fa-var-globe; }
+.@{fa-css-prefix}-wrench:before { content: @fa-var-wrench; }
+.@{fa-css-prefix}-tasks:before { content: @fa-var-tasks; }
+.@{fa-css-prefix}-filter:before { content: @fa-var-filter; }
+.@{fa-css-prefix}-briefcase:before { content: @fa-var-briefcase; }
+.@{fa-css-prefix}-arrows-alt:before { content: @fa-var-arrows-alt; }
+.@{fa-css-prefix}-group:before,
+.@{fa-css-prefix}-users:before { content: @fa-var-users; }
+.@{fa-css-prefix}-chain:before,
+.@{fa-css-prefix}-link:before { content: @fa-var-link; }
+.@{fa-css-prefix}-cloud:before { content: @fa-var-cloud; }
+.@{fa-css-prefix}-flask:before { content: @fa-var-flask; }
+.@{fa-css-prefix}-cut:before,
+.@{fa-css-prefix}-scissors:before { content: @fa-var-scissors; }
+.@{fa-css-prefix}-copy:before,
+.@{fa-css-prefix}-files-o:before { content: @fa-var-files-o; }
+.@{fa-css-prefix}-paperclip:before { content: @fa-var-paperclip; }
+.@{fa-css-prefix}-save:before,
+.@{fa-css-prefix}-floppy-o:before { content: @fa-var-floppy-o; }
+.@{fa-css-prefix}-square:before { content: @fa-var-square; }
+.@{fa-css-prefix}-navicon:before,
+.@{fa-css-prefix}-reorder:before,
+.@{fa-css-prefix}-bars:before { content: @fa-var-bars; }
+.@{fa-css-prefix}-list-ul:before { content: @fa-var-list-ul; }
+.@{fa-css-prefix}-list-ol:before { content: @fa-var-list-ol; }
+.@{fa-css-prefix}-strikethrough:before { content: @fa-var-strikethrough; }
+.@{fa-css-prefix}-underline:before { content: @fa-var-underline; }
+.@{fa-css-prefix}-table:before { content: @fa-var-table; }
+.@{fa-css-prefix}-magic:before { content: @fa-var-magic; }
+.@{fa-css-prefix}-truck:before { content: @fa-var-truck; }
+.@{fa-css-prefix}-pinterest:before { content: @fa-var-pinterest; }
+.@{fa-css-prefix}-pinterest-square:before { content: @fa-var-pinterest-square; }
+.@{fa-css-prefix}-google-plus-square:before { content: @fa-var-google-plus-square; }
+.@{fa-css-prefix}-google-plus:before { content: @fa-var-google-plus; }
+.@{fa-css-prefix}-money:before { content: @fa-var-money; }
+.@{fa-css-prefix}-caret-down:before { content: @fa-var-caret-down; }
+.@{fa-css-prefix}-caret-up:before { content: @fa-var-caret-up; }
+.@{fa-css-prefix}-caret-left:before { content: @fa-var-caret-left; }
+.@{fa-css-prefix}-caret-right:before { content: @fa-var-caret-right; }
+.@{fa-css-prefix}-columns:before { content: @fa-var-columns; }
+.@{fa-css-prefix}-unsorted:before,
+.@{fa-css-prefix}-sort:before { content: @fa-var-sort; }
+.@{fa-css-prefix}-sort-down:before,
+.@{fa-css-prefix}-sort-desc:before { content: @fa-var-sort-desc; }
+.@{fa-css-prefix}-sort-up:before,
+.@{fa-css-prefix}-sort-asc:before { content: @fa-var-sort-asc; }
+.@{fa-css-prefix}-envelope:before { content: @fa-var-envelope; }
+.@{fa-css-prefix}-linkedin:before { content: @fa-var-linkedin; }
+.@{fa-css-prefix}-rotate-left:before,
+.@{fa-css-prefix}-undo:before { content: @fa-var-undo; }
+.@{fa-css-prefix}-legal:before,
+.@{fa-css-prefix}-gavel:before { content: @fa-var-gavel; }
+.@{fa-css-prefix}-dashboard:before,
+.@{fa-css-prefix}-tachometer:before { content: @fa-var-tachometer; }
+.@{fa-css-prefix}-comment-o:before { content: @fa-var-comment-o; }
+.@{fa-css-prefix}-comments-o:before { content: @fa-var-comments-o; }
+.@{fa-css-prefix}-flash:before,
+.@{fa-css-prefix}-bolt:before { content: @fa-var-bolt; }
+.@{fa-css-prefix}-sitemap:before { content: @fa-var-sitemap; }
+.@{fa-css-prefix}-umbrella:before { content: @fa-var-umbrella; }
+.@{fa-css-prefix}-paste:before,
+.@{fa-css-prefix}-clipboard:before { content: @fa-var-clipboard; }
+.@{fa-css-prefix}-lightbulb-o:before { content: @fa-var-lightbulb-o; }
+.@{fa-css-prefix}-exchange:before { content: @fa-var-exchange; }
+.@{fa-css-prefix}-cloud-download:before { content: @fa-var-cloud-download; }
+.@{fa-css-prefix}-cloud-upload:before { content: @fa-var-cloud-upload; }
+.@{fa-css-prefix}-user-md:before { content: @fa-var-user-md; }
+.@{fa-css-prefix}-stethoscope:before { content: @fa-var-stethoscope; }
+.@{fa-css-prefix}-suitcase:before { content: @fa-var-suitcase; }
+.@{fa-css-prefix}-bell-o:before { content: @fa-var-bell-o; }
+.@{fa-css-prefix}-coffee:before { content: @fa-var-coffee; }
+.@{fa-css-prefix}-cutlery:before { content: @fa-var-cutlery; }
+.@{fa-css-prefix}-file-text-o:before { content: @fa-var-file-text-o; }
+.@{fa-css-prefix}-building-o:before { content: @fa-var-building-o; }
+.@{fa-css-prefix}-hospital-o:before { content: @fa-var-hospital-o; }
+.@{fa-css-prefix}-ambulance:before { content: @fa-var-ambulance; }
+.@{fa-css-prefix}-medkit:before { content: @fa-var-medkit; }
+.@{fa-css-prefix}-fighter-jet:before { content: @fa-var-fighter-jet; }
+.@{fa-css-prefix}-beer:before { content: @fa-var-beer; }
+.@{fa-css-prefix}-h-square:before { content: @fa-var-h-square; }
+.@{fa-css-prefix}-plus-square:before { content: @fa-var-plus-square; }
+.@{fa-css-prefix}-angle-double-left:before { content: @fa-var-angle-double-left; }
+.@{fa-css-prefix}-angle-double-right:before { content: @fa-var-angle-double-right; }
+.@{fa-css-prefix}-angle-double-up:before { content: @fa-var-angle-double-up; }
+.@{fa-css-prefix}-angle-double-down:before { content: @fa-var-angle-double-down; }
+.@{fa-css-prefix}-angle-left:before { content: @fa-var-angle-left; }
+.@{fa-css-prefix}-angle-right:before { content: @fa-var-angle-right; }
+.@{fa-css-prefix}-angle-up:before { content: @fa-var-angle-up; }
+.@{fa-css-prefix}-angle-down:before { content: @fa-var-angle-down; }
+.@{fa-css-prefix}-desktop:before { content: @fa-var-desktop; }
+.@{fa-css-prefix}-laptop:before { content: @fa-var-laptop; }
+.@{fa-css-prefix}-tablet:before { content: @fa-var-tablet; }
+.@{fa-css-prefix}-mobile-phone:before,
+.@{fa-css-prefix}-mobile:before { content: @fa-var-mobile; }
+.@{fa-css-prefix}-circle-o:before { content: @fa-var-circle-o; }
+.@{fa-css-prefix}-quote-left:before { content: @fa-var-quote-left; }
+.@{fa-css-prefix}-quote-right:before { content: @fa-var-quote-right; }
+.@{fa-css-prefix}-spinner:before { content: @fa-var-spinner; }
+.@{fa-css-prefix}-circle:before { content: @fa-var-circle; }
+.@{fa-css-prefix}-mail-reply:before,
+.@{fa-css-prefix}-reply:before { content: @fa-var-reply; }
+.@{fa-css-prefix}-github-alt:before { content: @fa-var-github-alt; }
+.@{fa-css-prefix}-folder-o:before { content: @fa-var-folder-o; }
+.@{fa-css-prefix}-folder-open-o:before { content: @fa-var-folder-open-o; }
+.@{fa-css-prefix}-smile-o:before { content: @fa-var-smile-o; }
+.@{fa-css-prefix}-frown-o:before { content: @fa-var-frown-o; }
+.@{fa-css-prefix}-meh-o:before { content: @fa-var-meh-o; }
+.@{fa-css-prefix}-gamepad:before { content: @fa-var-gamepad; }
+.@{fa-css-prefix}-keyboard-o:before { content: @fa-var-keyboard-o; }
+.@{fa-css-prefix}-flag-o:before { content: @fa-var-flag-o; }
+.@{fa-css-prefix}-flag-checkered:before { content: @fa-var-flag-checkered; }
+.@{fa-css-prefix}-terminal:before { content: @fa-var-terminal; }
+.@{fa-css-prefix}-code:before { content: @fa-var-code; }
+.@{fa-css-prefix}-mail-reply-all:before,
+.@{fa-css-prefix}-reply-all:before { content: @fa-var-reply-all; }
+.@{fa-css-prefix}-star-half-empty:before,
+.@{fa-css-prefix}-star-half-full:before,
+.@{fa-css-prefix}-star-half-o:before { content: @fa-var-star-half-o; }
+.@{fa-css-prefix}-location-arrow:before { content: @fa-var-location-arrow; }
+.@{fa-css-prefix}-crop:before { content: @fa-var-crop; }
+.@{fa-css-prefix}-code-fork:before { content: @fa-var-code-fork; }
+.@{fa-css-prefix}-unlink:before,
+.@{fa-css-prefix}-chain-broken:before { content: @fa-var-chain-broken; }
+.@{fa-css-prefix}-question:before { content: @fa-var-question; }
+.@{fa-css-prefix}-info:before { content: @fa-var-info; }
+.@{fa-css-prefix}-exclamation:before { content: @fa-var-exclamation; }
+.@{fa-css-prefix}-superscript:before { content: @fa-var-superscript; }
+.@{fa-css-prefix}-subscript:before { content: @fa-var-subscript; }
+.@{fa-css-prefix}-eraser:before { content: @fa-var-eraser; }
+.@{fa-css-prefix}-puzzle-piece:before { content: @fa-var-puzzle-piece; }
+.@{fa-css-prefix}-microphone:before { content: @fa-var-microphone; }
+.@{fa-css-prefix}-microphone-slash:before { content: @fa-var-microphone-slash; }
+.@{fa-css-prefix}-shield:before { content: @fa-var-shield; }
+.@{fa-css-prefix}-calendar-o:before { content: @fa-var-calendar-o; }
+.@{fa-css-prefix}-fire-extinguisher:before { content: @fa-var-fire-extinguisher; }
+.@{fa-css-prefix}-rocket:before { content: @fa-var-rocket; }
+.@{fa-css-prefix}-maxcdn:before { content: @fa-var-maxcdn; }
+.@{fa-css-prefix}-chevron-circle-left:before { content: @fa-var-chevron-circle-left; }
+.@{fa-css-prefix}-chevron-circle-right:before { content: @fa-var-chevron-circle-right; }
+.@{fa-css-prefix}-chevron-circle-up:before { content: @fa-var-chevron-circle-up; }
+.@{fa-css-prefix}-chevron-circle-down:before { content: @fa-var-chevron-circle-down; }
+.@{fa-css-prefix}-html5:before { content: @fa-var-html5; }
+.@{fa-css-prefix}-css3:before { content: @fa-var-css3; }
+.@{fa-css-prefix}-anchor:before { content: @fa-var-anchor; }
+.@{fa-css-prefix}-unlock-alt:before { content: @fa-var-unlock-alt; }
+.@{fa-css-prefix}-bullseye:before { content: @fa-var-bullseye; }
+.@{fa-css-prefix}-ellipsis-h:before { content: @fa-var-ellipsis-h; }
+.@{fa-css-prefix}-ellipsis-v:before { content: @fa-var-ellipsis-v; }
+.@{fa-css-prefix}-rss-square:before { content: @fa-var-rss-square; }
+.@{fa-css-prefix}-play-circle:before { content: @fa-var-play-circle; }
+.@{fa-css-prefix}-ticket:before { content: @fa-var-ticket; }
+.@{fa-css-prefix}-minus-square:before { content: @fa-var-minus-square; }
+.@{fa-css-prefix}-minus-square-o:before { content: @fa-var-minus-square-o; }
+.@{fa-css-prefix}-level-up:before { content: @fa-var-level-up; }
+.@{fa-css-prefix}-level-down:before { content: @fa-var-level-down; }
+.@{fa-css-prefix}-check-square:before { content: @fa-var-check-square; }
+.@{fa-css-prefix}-pencil-square:before { content: @fa-var-pencil-square; }
+.@{fa-css-prefix}-external-link-square:before { content: @fa-var-external-link-square; }
+.@{fa-css-prefix}-share-square:before { content: @fa-var-share-square; }
+.@{fa-css-prefix}-compass:before { content: @fa-var-compass; }
+.@{fa-css-prefix}-toggle-down:before,
+.@{fa-css-prefix}-caret-square-o-down:before { content: @fa-var-caret-square-o-down; }
+.@{fa-css-prefix}-toggle-up:before,
+.@{fa-css-prefix}-caret-square-o-up:before { content: @fa-var-caret-square-o-up; }
+.@{fa-css-prefix}-toggle-right:before,
+.@{fa-css-prefix}-caret-square-o-right:before { content: @fa-var-caret-square-o-right; }
+.@{fa-css-prefix}-euro:before,
+.@{fa-css-prefix}-eur:before { content: @fa-var-eur; }
+.@{fa-css-prefix}-gbp:before { content: @fa-var-gbp; }
+.@{fa-css-prefix}-dollar:before,
+.@{fa-css-prefix}-usd:before { content: @fa-var-usd; }
+.@{fa-css-prefix}-rupee:before,
+.@{fa-css-prefix}-inr:before { content: @fa-var-inr; }
+.@{fa-css-prefix}-cny:before,
+.@{fa-css-prefix}-rmb:before,
+.@{fa-css-prefix}-yen:before,
+.@{fa-css-prefix}-jpy:before { content: @fa-var-jpy; }
+.@{fa-css-prefix}-ruble:before,
+.@{fa-css-prefix}-rouble:before,
+.@{fa-css-prefix}-rub:before { content: @fa-var-rub; }
+.@{fa-css-prefix}-won:before,
+.@{fa-css-prefix}-krw:before { content: @fa-var-krw; }
+.@{fa-css-prefix}-bitcoin:before,
+.@{fa-css-prefix}-btc:before { content: @fa-var-btc; }
+.@{fa-css-prefix}-file:before { content: @fa-var-file; }
+.@{fa-css-prefix}-file-text:before { content: @fa-var-file-text; }
+.@{fa-css-prefix}-sort-alpha-asc:before { content: @fa-var-sort-alpha-asc; }
+.@{fa-css-prefix}-sort-alpha-desc:before { content: @fa-var-sort-alpha-desc; }
+.@{fa-css-prefix}-sort-amount-asc:before { content: @fa-var-sort-amount-asc; }
+.@{fa-css-prefix}-sort-amount-desc:before { content: @fa-var-sort-amount-desc; }
+.@{fa-css-prefix}-sort-numeric-asc:before { content: @fa-var-sort-numeric-asc; }
+.@{fa-css-prefix}-sort-numeric-desc:before { content: @fa-var-sort-numeric-desc; }
+.@{fa-css-prefix}-thumbs-up:before { content: @fa-var-thumbs-up; }
+.@{fa-css-prefix}-thumbs-down:before { content: @fa-var-thumbs-down; }
+.@{fa-css-prefix}-youtube-square:before { content: @fa-var-youtube-square; }
+.@{fa-css-prefix}-youtube:before { content: @fa-var-youtube; }
+.@{fa-css-prefix}-xing:before { content: @fa-var-xing; }
+.@{fa-css-prefix}-xing-square:before { content: @fa-var-xing-square; }
+.@{fa-css-prefix}-youtube-play:before { content: @fa-var-youtube-play; }
+.@{fa-css-prefix}-dropbox:before { content: @fa-var-dropbox; }
+.@{fa-css-prefix}-stack-overflow:before { content: @fa-var-stack-overflow; }
+.@{fa-css-prefix}-instagram:before { content: @fa-var-instagram; }
+.@{fa-css-prefix}-flickr:before { content: @fa-var-flickr; }
+.@{fa-css-prefix}-adn:before { content: @fa-var-adn; }
+.@{fa-css-prefix}-bitbucket:before { content: @fa-var-bitbucket; }
+.@{fa-css-prefix}-bitbucket-square:before { content: @fa-var-bitbucket-square; }
+.@{fa-css-prefix}-tumblr:before { content: @fa-var-tumblr; }
+.@{fa-css-prefix}-tumblr-square:before { content: @fa-var-tumblr-square; }
+.@{fa-css-prefix}-long-arrow-down:before { content: @fa-var-long-arrow-down; }
+.@{fa-css-prefix}-long-arrow-up:before { content: @fa-var-long-arrow-up; }
+.@{fa-css-prefix}-long-arrow-left:before { content: @fa-var-long-arrow-left; }
+.@{fa-css-prefix}-long-arrow-right:before { content: @fa-var-long-arrow-right; }
+.@{fa-css-prefix}-apple:before { content: @fa-var-apple; }
+.@{fa-css-prefix}-windows:before { content: @fa-var-windows; }
+.@{fa-css-prefix}-android:before { content: @fa-var-android; }
+.@{fa-css-prefix}-linux:before { content: @fa-var-linux; }
+.@{fa-css-prefix}-dribbble:before { content: @fa-var-dribbble; }
+.@{fa-css-prefix}-skype:before { content: @fa-var-skype; }
+.@{fa-css-prefix}-foursquare:before { content: @fa-var-foursquare; }
+.@{fa-css-prefix}-trello:before { content: @fa-var-trello; }
+.@{fa-css-prefix}-female:before { content: @fa-var-female; }
+.@{fa-css-prefix}-male:before { content: @fa-var-male; }
+.@{fa-css-prefix}-gittip:before,
+.@{fa-css-prefix}-gratipay:before { content: @fa-var-gratipay; }
+.@{fa-css-prefix}-sun-o:before { content: @fa-var-sun-o; }
+.@{fa-css-prefix}-moon-o:before { content: @fa-var-moon-o; }
+.@{fa-css-prefix}-archive:before { content: @fa-var-archive; }
+.@{fa-css-prefix}-bug:before { content: @fa-var-bug; }
+.@{fa-css-prefix}-vk:before { content: @fa-var-vk; }
+.@{fa-css-prefix}-weibo:before { content: @fa-var-weibo; }
+.@{fa-css-prefix}-renren:before { content: @fa-var-renren; }
+.@{fa-css-prefix}-pagelines:before { content: @fa-var-pagelines; }
+.@{fa-css-prefix}-stack-exchange:before { content: @fa-var-stack-exchange; }
+.@{fa-css-prefix}-arrow-circle-o-right:before { content: @fa-var-arrow-circle-o-right; }
+.@{fa-css-prefix}-arrow-circle-o-left:before { content: @fa-var-arrow-circle-o-left; }
+.@{fa-css-prefix}-toggle-left:before,
+.@{fa-css-prefix}-caret-square-o-left:before { content: @fa-var-caret-square-o-left; }
+.@{fa-css-prefix}-dot-circle-o:before { content: @fa-var-dot-circle-o; }
+.@{fa-css-prefix}-wheelchair:before { content: @fa-var-wheelchair; }
+.@{fa-css-prefix}-vimeo-square:before { content: @fa-var-vimeo-square; }
+.@{fa-css-prefix}-turkish-lira:before,
+.@{fa-css-prefix}-try:before { content: @fa-var-try; }
+.@{fa-css-prefix}-plus-square-o:before { content: @fa-var-plus-square-o; }
+.@{fa-css-prefix}-space-shuttle:before { content: @fa-var-space-shuttle; }
+.@{fa-css-prefix}-slack:before { content: @fa-var-slack; }
+.@{fa-css-prefix}-envelope-square:before { content: @fa-var-envelope-square; }
+.@{fa-css-prefix}-wordpress:before { content: @fa-var-wordpress; }
+.@{fa-css-prefix}-openid:before { content: @fa-var-openid; }
+.@{fa-css-prefix}-institution:before,
+.@{fa-css-prefix}-bank:before,
+.@{fa-css-prefix}-university:before { content: @fa-var-university; }
+.@{fa-css-prefix}-mortar-board:before,
+.@{fa-css-prefix}-graduation-cap:before { content: @fa-var-graduation-cap; }
+.@{fa-css-prefix}-yahoo:before { content: @fa-var-yahoo; }
+.@{fa-css-prefix}-google:before { content: @fa-var-google; }
+.@{fa-css-prefix}-reddit:before { content: @fa-var-reddit; }
+.@{fa-css-prefix}-reddit-square:before { content: @fa-var-reddit-square; }
+.@{fa-css-prefix}-stumbleupon-circle:before { content: @fa-var-stumbleupon-circle; }
+.@{fa-css-prefix}-stumbleupon:before { content: @fa-var-stumbleupon; }
+.@{fa-css-prefix}-delicious:before { content: @fa-var-delicious; }
+.@{fa-css-prefix}-digg:before { content: @fa-var-digg; }
+.@{fa-css-prefix}-pied-piper:before { content: @fa-var-pied-piper; }
+.@{fa-css-prefix}-pied-piper-alt:before { content: @fa-var-pied-piper-alt; }
+.@{fa-css-prefix}-drupal:before { content: @fa-var-drupal; }
+.@{fa-css-prefix}-joomla:before { content: @fa-var-joomla; }
+.@{fa-css-prefix}-language:before { content: @fa-var-language; }
+.@{fa-css-prefix}-fax:before { content: @fa-var-fax; }
+.@{fa-css-prefix}-building:before { content: @fa-var-building; }
+.@{fa-css-prefix}-child:before { content: @fa-var-child; }
+.@{fa-css-prefix}-paw:before { content: @fa-var-paw; }
+.@{fa-css-prefix}-spoon:before { content: @fa-var-spoon; }
+.@{fa-css-prefix}-cube:before { content: @fa-var-cube; }
+.@{fa-css-prefix}-cubes:before { content: @fa-var-cubes; }
+.@{fa-css-prefix}-behance:before { content: @fa-var-behance; }
+.@{fa-css-prefix}-behance-square:before { content: @fa-var-behance-square; }
+.@{fa-css-prefix}-steam:before { content: @fa-var-steam; }
+.@{fa-css-prefix}-steam-square:before { content: @fa-var-steam-square; }
+.@{fa-css-prefix}-recycle:before { content: @fa-var-recycle; }
+.@{fa-css-prefix}-automobile:before,
+.@{fa-css-prefix}-car:before { content: @fa-var-car; }
+.@{fa-css-prefix}-cab:before,
+.@{fa-css-prefix}-taxi:before { content: @fa-var-taxi; }
+.@{fa-css-prefix}-tree:before { content: @fa-var-tree; }
+.@{fa-css-prefix}-spotify:before { content: @fa-var-spotify; }
+.@{fa-css-prefix}-deviantart:before { content: @fa-var-deviantart; }
+.@{fa-css-prefix}-soundcloud:before { content: @fa-var-soundcloud; }
+.@{fa-css-prefix}-database:before { content: @fa-var-database; }
+.@{fa-css-prefix}-file-pdf-o:before { content: @fa-var-file-pdf-o; }
+.@{fa-css-prefix}-file-word-o:before { content: @fa-var-file-word-o; }
+.@{fa-css-prefix}-file-excel-o:before { content: @fa-var-file-excel-o; }
+.@{fa-css-prefix}-file-powerpoint-o:before { content: @fa-var-file-powerpoint-o; }
+.@{fa-css-prefix}-file-photo-o:before,
+.@{fa-css-prefix}-file-picture-o:before,
+.@{fa-css-prefix}-file-image-o:before { content: @fa-var-file-image-o; }
+.@{fa-css-prefix}-file-zip-o:before,
+.@{fa-css-prefix}-file-archive-o:before { content: @fa-var-file-archive-o; }
+.@{fa-css-prefix}-file-sound-o:before,
+.@{fa-css-prefix}-file-audio-o:before { content: @fa-var-file-audio-o; }
+.@{fa-css-prefix}-file-movie-o:before,
+.@{fa-css-prefix}-file-video-o:before { content: @fa-var-file-video-o; }
+.@{fa-css-prefix}-file-code-o:before { content: @fa-var-file-code-o; }
+.@{fa-css-prefix}-vine:before { content: @fa-var-vine; }
+.@{fa-css-prefix}-codepen:before { content: @fa-var-codepen; }
+.@{fa-css-prefix}-jsfiddle:before { content: @fa-var-jsfiddle; }
+.@{fa-css-prefix}-life-bouy:before,
+.@{fa-css-prefix}-life-buoy:before,
+.@{fa-css-prefix}-life-saver:before,
+.@{fa-css-prefix}-support:before,
+.@{fa-css-prefix}-life-ring:before { content: @fa-var-life-ring; }
+.@{fa-css-prefix}-circle-o-notch:before { content: @fa-var-circle-o-notch; }
+.@{fa-css-prefix}-ra:before,
+.@{fa-css-prefix}-rebel:before { content: @fa-var-rebel; }
+.@{fa-css-prefix}-ge:before,
+.@{fa-css-prefix}-empire:before { content: @fa-var-empire; }
+.@{fa-css-prefix}-git-square:before { content: @fa-var-git-square; }
+.@{fa-css-prefix}-git:before { content: @fa-var-git; }
+.@{fa-css-prefix}-y-combinator-square:before,
+.@{fa-css-prefix}-yc-square:before,
+.@{fa-css-prefix}-hacker-news:before { content: @fa-var-hacker-news; }
+.@{fa-css-prefix}-tencent-weibo:before { content: @fa-var-tencent-weibo; }
+.@{fa-css-prefix}-qq:before { content: @fa-var-qq; }
+.@{fa-css-prefix}-wechat:before,
+.@{fa-css-prefix}-weixin:before { content: @fa-var-weixin; }
+.@{fa-css-prefix}-send:before,
+.@{fa-css-prefix}-paper-plane:before { content: @fa-var-paper-plane; }
+.@{fa-css-prefix}-send-o:before,
+.@{fa-css-prefix}-paper-plane-o:before { content: @fa-var-paper-plane-o; }
+.@{fa-css-prefix}-history:before { content: @fa-var-history; }
+.@{fa-css-prefix}-circle-thin:before { content: @fa-var-circle-thin; }
+.@{fa-css-prefix}-header:before { content: @fa-var-header; }
+.@{fa-css-prefix}-paragraph:before { content: @fa-var-paragraph; }
+.@{fa-css-prefix}-sliders:before { content: @fa-var-sliders; }
+.@{fa-css-prefix}-share-alt:before { content: @fa-var-share-alt; }
+.@{fa-css-prefix}-share-alt-square:before { content: @fa-var-share-alt-square; }
+.@{fa-css-prefix}-bomb:before { content: @fa-var-bomb; }
+.@{fa-css-prefix}-soccer-ball-o:before,
+.@{fa-css-prefix}-futbol-o:before { content: @fa-var-futbol-o; }
+.@{fa-css-prefix}-tty:before { content: @fa-var-tty; }
+.@{fa-css-prefix}-binoculars:before { content: @fa-var-binoculars; }
+.@{fa-css-prefix}-plug:before { content: @fa-var-plug; }
+.@{fa-css-prefix}-slideshare:before { content: @fa-var-slideshare; }
+.@{fa-css-prefix}-twitch:before { content: @fa-var-twitch; }
+.@{fa-css-prefix}-yelp:before { content: @fa-var-yelp; }
+.@{fa-css-prefix}-newspaper-o:before { content: @fa-var-newspaper-o; }
+.@{fa-css-prefix}-wifi:before { content: @fa-var-wifi; }
+.@{fa-css-prefix}-calculator:before { content: @fa-var-calculator; }
+.@{fa-css-prefix}-paypal:before { content: @fa-var-paypal; }
+.@{fa-css-prefix}-google-wallet:before { content: @fa-var-google-wallet; }
+.@{fa-css-prefix}-cc-visa:before { content: @fa-var-cc-visa; }
+.@{fa-css-prefix}-cc-mastercard:before { content: @fa-var-cc-mastercard; }
+.@{fa-css-prefix}-cc-discover:before { content: @fa-var-cc-discover; }
+.@{fa-css-prefix}-cc-amex:before { content: @fa-var-cc-amex; }
+.@{fa-css-prefix}-cc-paypal:before { content: @fa-var-cc-paypal; }
+.@{fa-css-prefix}-cc-stripe:before { content: @fa-var-cc-stripe; }
+.@{fa-css-prefix}-bell-slash:before { content: @fa-var-bell-slash; }
+.@{fa-css-prefix}-bell-slash-o:before { content: @fa-var-bell-slash-o; }
+.@{fa-css-prefix}-trash:before { content: @fa-var-trash; }
+.@{fa-css-prefix}-copyright:before { content: @fa-var-copyright; }
+.@{fa-css-prefix}-at:before { content: @fa-var-at; }
+.@{fa-css-prefix}-eyedropper:before { content: @fa-var-eyedropper; }
+.@{fa-css-prefix}-paint-brush:before { content: @fa-var-paint-brush; }
+.@{fa-css-prefix}-birthday-cake:before { content: @fa-var-birthday-cake; }
+.@{fa-css-prefix}-area-chart:before { content: @fa-var-area-chart; }
+.@{fa-css-prefix}-pie-chart:before { content: @fa-var-pie-chart; }
+.@{fa-css-prefix}-line-chart:before { content: @fa-var-line-chart; }
+.@{fa-css-prefix}-lastfm:before { content: @fa-var-lastfm; }
+.@{fa-css-prefix}-lastfm-square:before { content: @fa-var-lastfm-square; }
+.@{fa-css-prefix}-toggle-off:before { content: @fa-var-toggle-off; }
+.@{fa-css-prefix}-toggle-on:before { content: @fa-var-toggle-on; }
+.@{fa-css-prefix}-bicycle:before { content: @fa-var-bicycle; }
+.@{fa-css-prefix}-bus:before { content: @fa-var-bus; }
+.@{fa-css-prefix}-ioxhost:before { content: @fa-var-ioxhost; }
+.@{fa-css-prefix}-angellist:before { content: @fa-var-angellist; }
+.@{fa-css-prefix}-cc:before { content: @fa-var-cc; }
+.@{fa-css-prefix}-shekel:before,
+.@{fa-css-prefix}-sheqel:before,
+.@{fa-css-prefix}-ils:before { content: @fa-var-ils; }
+.@{fa-css-prefix}-meanpath:before { content: @fa-var-meanpath; }
+.@{fa-css-prefix}-buysellads:before { content: @fa-var-buysellads; }
+.@{fa-css-prefix}-connectdevelop:before { content: @fa-var-connectdevelop; }
+.@{fa-css-prefix}-dashcube:before { content: @fa-var-dashcube; }
+.@{fa-css-prefix}-forumbee:before { content: @fa-var-forumbee; }
+.@{fa-css-prefix}-leanpub:before { content: @fa-var-leanpub; }
+.@{fa-css-prefix}-sellsy:before { content: @fa-var-sellsy; }
+.@{fa-css-prefix}-shirtsinbulk:before { content: @fa-var-shirtsinbulk; }
+.@{fa-css-prefix}-simplybuilt:before { content: @fa-var-simplybuilt; }
+.@{fa-css-prefix}-skyatlas:before { content: @fa-var-skyatlas; }
+.@{fa-css-prefix}-cart-plus:before { content: @fa-var-cart-plus; }
+.@{fa-css-prefix}-cart-arrow-down:before { content: @fa-var-cart-arrow-down; }
+.@{fa-css-prefix}-diamond:before { content: @fa-var-diamond; }
+.@{fa-css-prefix}-ship:before { content: @fa-var-ship; }
+.@{fa-css-prefix}-user-secret:before { content: @fa-var-user-secret; }
+.@{fa-css-prefix}-motorcycle:before { content: @fa-var-motorcycle; }
+.@{fa-css-prefix}-street-view:before { content: @fa-var-street-view; }
+.@{fa-css-prefix}-heartbeat:before { content: @fa-var-heartbeat; }
+.@{fa-css-prefix}-venus:before { content: @fa-var-venus; }
+.@{fa-css-prefix}-mars:before { content: @fa-var-mars; }
+.@{fa-css-prefix}-mercury:before { content: @fa-var-mercury; }
+.@{fa-css-prefix}-intersex:before,
+.@{fa-css-prefix}-transgender:before { content: @fa-var-transgender; }
+.@{fa-css-prefix}-transgender-alt:before { content: @fa-var-transgender-alt; }
+.@{fa-css-prefix}-venus-double:before { content: @fa-var-venus-double; }
+.@{fa-css-prefix}-mars-double:before { content: @fa-var-mars-double; }
+.@{fa-css-prefix}-venus-mars:before { content: @fa-var-venus-mars; }
+.@{fa-css-prefix}-mars-stroke:before { content: @fa-var-mars-stroke; }
+.@{fa-css-prefix}-mars-stroke-v:before { content: @fa-var-mars-stroke-v; }
+.@{fa-css-prefix}-mars-stroke-h:before { content: @fa-var-mars-stroke-h; }
+.@{fa-css-prefix}-neuter:before { content: @fa-var-neuter; }
+.@{fa-css-prefix}-genderless:before { content: @fa-var-genderless; }
+.@{fa-css-prefix}-facebook-official:before { content: @fa-var-facebook-official; }
+.@{fa-css-prefix}-pinterest-p:before { content: @fa-var-pinterest-p; }
+.@{fa-css-prefix}-whatsapp:before { content: @fa-var-whatsapp; }
+.@{fa-css-prefix}-server:before { content: @fa-var-server; }
+.@{fa-css-prefix}-user-plus:before { content: @fa-var-user-plus; }
+.@{fa-css-prefix}-user-times:before { content: @fa-var-user-times; }
+.@{fa-css-prefix}-hotel:before,
+.@{fa-css-prefix}-bed:before { content: @fa-var-bed; }
+.@{fa-css-prefix}-viacoin:before { content: @fa-var-viacoin; }
+.@{fa-css-prefix}-train:before { content: @fa-var-train; }
+.@{fa-css-prefix}-subway:before { content: @fa-var-subway; }
+.@{fa-css-prefix}-medium:before { content: @fa-var-medium; }
+.@{fa-css-prefix}-yc:before,
+.@{fa-css-prefix}-y-combinator:before { content: @fa-var-y-combinator; }
+.@{fa-css-prefix}-optin-monster:before { content: @fa-var-optin-monster; }
+.@{fa-css-prefix}-opencart:before { content: @fa-var-opencart; }
+.@{fa-css-prefix}-expeditedssl:before { content: @fa-var-expeditedssl; }
+.@{fa-css-prefix}-battery-4:before,
+.@{fa-css-prefix}-battery-full:before { content: @fa-var-battery-full; }
+.@{fa-css-prefix}-battery-3:before,
+.@{fa-css-prefix}-battery-three-quarters:before { content: @fa-var-battery-three-quarters; }
+.@{fa-css-prefix}-battery-2:before,
+.@{fa-css-prefix}-battery-half:before { content: @fa-var-battery-half; }
+.@{fa-css-prefix}-battery-1:before,
+.@{fa-css-prefix}-battery-quarter:before { content: @fa-var-battery-quarter; }
+.@{fa-css-prefix}-battery-0:before,
+.@{fa-css-prefix}-battery-empty:before { content: @fa-var-battery-empty; }
+.@{fa-css-prefix}-mouse-pointer:before { content: @fa-var-mouse-pointer; }
+.@{fa-css-prefix}-i-cursor:before { content: @fa-var-i-cursor; }
+.@{fa-css-prefix}-object-group:before { content: @fa-var-object-group; }
+.@{fa-css-prefix}-object-ungroup:before { content: @fa-var-object-ungroup; }
+.@{fa-css-prefix}-sticky-note:before { content: @fa-var-sticky-note; }
+.@{fa-css-prefix}-sticky-note-o:before { content: @fa-var-sticky-note-o; }
+.@{fa-css-prefix}-cc-jcb:before { content: @fa-var-cc-jcb; }
+.@{fa-css-prefix}-cc-diners-club:before { content: @fa-var-cc-diners-club; }
+.@{fa-css-prefix}-clone:before { content: @fa-var-clone; }
+.@{fa-css-prefix}-balance-scale:before { content: @fa-var-balance-scale; }
+.@{fa-css-prefix}-hourglass-o:before { content: @fa-var-hourglass-o; }
+.@{fa-css-prefix}-hourglass-1:before,
+.@{fa-css-prefix}-hourglass-start:before { content: @fa-var-hourglass-start; }
+.@{fa-css-prefix}-hourglass-2:before,
+.@{fa-css-prefix}-hourglass-half:before { content: @fa-var-hourglass-half; }
+.@{fa-css-prefix}-hourglass-3:before,
+.@{fa-css-prefix}-hourglass-end:before { content: @fa-var-hourglass-end; }
+.@{fa-css-prefix}-hourglass:before { content: @fa-var-hourglass; }
+.@{fa-css-prefix}-hand-grab-o:before,
+.@{fa-css-prefix}-hand-rock-o:before { content: @fa-var-hand-rock-o; }
+.@{fa-css-prefix}-hand-stop-o:before,
+.@{fa-css-prefix}-hand-paper-o:before { content: @fa-var-hand-paper-o; }
+.@{fa-css-prefix}-hand-scissors-o:before { content: @fa-var-hand-scissors-o; }
+.@{fa-css-prefix}-hand-lizard-o:before { content: @fa-var-hand-lizard-o; }
+.@{fa-css-prefix}-hand-spock-o:before { content: @fa-var-hand-spock-o; }
+.@{fa-css-prefix}-hand-pointer-o:before { content: @fa-var-hand-pointer-o; }
+.@{fa-css-prefix}-hand-peace-o:before { content: @fa-var-hand-peace-o; }
+.@{fa-css-prefix}-trademark:before { content: @fa-var-trademark; }
+.@{fa-css-prefix}-registered:before { content: @fa-var-registered; }
+.@{fa-css-prefix}-creative-commons:before { content: @fa-var-creative-commons; }
+.@{fa-css-prefix}-gg:before { content: @fa-var-gg; }
+.@{fa-css-prefix}-gg-circle:before { content: @fa-var-gg-circle; }
+.@{fa-css-prefix}-tripadvisor:before { content: @fa-var-tripadvisor; }
+.@{fa-css-prefix}-odnoklassniki:before { content: @fa-var-odnoklassniki; }
+.@{fa-css-prefix}-odnoklassniki-square:before { content: @fa-var-odnoklassniki-square; }
+.@{fa-css-prefix}-get-pocket:before { content: @fa-var-get-pocket; }
+.@{fa-css-prefix}-wikipedia-w:before { content: @fa-var-wikipedia-w; }
+.@{fa-css-prefix}-safari:before { content: @fa-var-safari; }
+.@{fa-css-prefix}-chrome:before { content: @fa-var-chrome; }
+.@{fa-css-prefix}-firefox:before { content: @fa-var-firefox; }
+.@{fa-css-prefix}-opera:before { content: @fa-var-opera; }
+.@{fa-css-prefix}-internet-explorer:before { content: @fa-var-internet-explorer; }
+.@{fa-css-prefix}-tv:before,
+.@{fa-css-prefix}-television:before { content: @fa-var-television; }
+.@{fa-css-prefix}-contao:before { content: @fa-var-contao; }
+.@{fa-css-prefix}-500px:before { content: @fa-var-500px; }
+.@{fa-css-prefix}-amazon:before { content: @fa-var-amazon; }
+.@{fa-css-prefix}-calendar-plus-o:before { content: @fa-var-calendar-plus-o; }
+.@{fa-css-prefix}-calendar-minus-o:before { content: @fa-var-calendar-minus-o; }
+.@{fa-css-prefix}-calendar-times-o:before { content: @fa-var-calendar-times-o; }
+.@{fa-css-prefix}-calendar-check-o:before { content: @fa-var-calendar-check-o; }
+.@{fa-css-prefix}-industry:before { content: @fa-var-industry; }
+.@{fa-css-prefix}-map-pin:before { content: @fa-var-map-pin; }
+.@{fa-css-prefix}-map-signs:before { content: @fa-var-map-signs; }
+.@{fa-css-prefix}-map-o:before { content: @fa-var-map-o; }
+.@{fa-css-prefix}-map:before { content: @fa-var-map; }
+.@{fa-css-prefix}-commenting:before { content: @fa-var-commenting; }
+.@{fa-css-prefix}-commenting-o:before { content: @fa-var-commenting-o; }
+.@{fa-css-prefix}-houzz:before { content: @fa-var-houzz; }
+.@{fa-css-prefix}-vimeo:before { content: @fa-var-vimeo; }
+.@{fa-css-prefix}-black-tie:before { content: @fa-var-black-tie; }
+.@{fa-css-prefix}-fonticons:before { content: @fa-var-fonticons; }
+.@{fa-css-prefix}-reddit-alien:before { content: @fa-var-reddit-alien; }
+.@{fa-css-prefix}-edge:before { content: @fa-var-edge; }
+.@{fa-css-prefix}-credit-card-alt:before { content: @fa-var-credit-card-alt; }
+.@{fa-css-prefix}-codiepie:before { content: @fa-var-codiepie; }
+.@{fa-css-prefix}-modx:before { content: @fa-var-modx; }
+.@{fa-css-prefix}-fort-awesome:before { content: @fa-var-fort-awesome; }
+.@{fa-css-prefix}-usb:before { content: @fa-var-usb; }
+.@{fa-css-prefix}-product-hunt:before { content: @fa-var-product-hunt; }
+.@{fa-css-prefix}-mixcloud:before { content: @fa-var-mixcloud; }
+.@{fa-css-prefix}-scribd:before { content: @fa-var-scribd; }
+.@{fa-css-prefix}-pause-circle:before { content: @fa-var-pause-circle; }
+.@{fa-css-prefix}-pause-circle-o:before { content: @fa-var-pause-circle-o; }
+.@{fa-css-prefix}-stop-circle:before { content: @fa-var-stop-circle; }
+.@{fa-css-prefix}-stop-circle-o:before { content: @fa-var-stop-circle-o; }
+.@{fa-css-prefix}-shopping-bag:before { content: @fa-var-shopping-bag; }
+.@{fa-css-prefix}-shopping-basket:before { content: @fa-var-shopping-basket; }
+.@{fa-css-prefix}-hashtag:before { content: @fa-var-hashtag; }
+.@{fa-css-prefix}-bluetooth:before { content: @fa-var-bluetooth; }
+.@{fa-css-prefix}-bluetooth-b:before { content: @fa-var-bluetooth-b; }
+.@{fa-css-prefix}-percent:before { content: @fa-var-percent; }
diff --git a/asset/static/fonts/less/larger.less b/asset/static/fonts/less/larger.less
new file mode 100755
index 0000000..c9d6467
--- /dev/null
+++ b/asset/static/fonts/less/larger.less
@@ -0,0 +1,13 @@
+// Icon Sizes
+// -------------------------
+
+/* makes the font 33% larger relative to the icon container */
+.@{fa-css-prefix}-lg {
+ font-size: (4em / 3);
+ line-height: (3em / 4);
+ vertical-align: -15%;
+}
+.@{fa-css-prefix}-2x { font-size: 2em; }
+.@{fa-css-prefix}-3x { font-size: 3em; }
+.@{fa-css-prefix}-4x { font-size: 4em; }
+.@{fa-css-prefix}-5x { font-size: 5em; }
diff --git a/asset/static/fonts/less/list.less b/asset/static/fonts/less/list.less
new file mode 100755
index 0000000..0b44038
--- /dev/null
+++ b/asset/static/fonts/less/list.less
@@ -0,0 +1,19 @@
+// List Icons
+// -------------------------
+
+.@{fa-css-prefix}-ul {
+ padding-left: 0;
+ margin-left: @fa-li-width;
+ list-style-type: none;
+ > li { position: relative; }
+}
+.@{fa-css-prefix}-li {
+ position: absolute;
+ left: -@fa-li-width;
+ width: @fa-li-width;
+ top: (2em / 14);
+ text-align: center;
+ &.@{fa-css-prefix}-lg {
+ left: (-@fa-li-width + (4em / 14));
+ }
+}
diff --git a/asset/static/fonts/less/mixins.less b/asset/static/fonts/less/mixins.less
new file mode 100755
index 0000000..d5a43a1
--- /dev/null
+++ b/asset/static/fonts/less/mixins.less
@@ -0,0 +1,26 @@
+// Mixins
+// --------------------------
+
+.fa-icon() {
+ display: inline-block;
+ font: normal normal normal @fa-font-size-base/@fa-line-height-base FontAwesome; // shortening font declaration
+ font-size: inherit; // can't have font-size inherit on line above, so need to override
+ text-rendering: auto; // optimizelegibility throws things off #1094
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+
+}
+
+.fa-icon-rotate(@degrees, @rotation) {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation);
+ -webkit-transform: rotate(@degrees);
+ -ms-transform: rotate(@degrees);
+ transform: rotate(@degrees);
+}
+
+.fa-icon-flip(@horiz, @vert, @rotation) {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation, mirror=1);
+ -webkit-transform: scale(@horiz, @vert);
+ -ms-transform: scale(@horiz, @vert);
+ transform: scale(@horiz, @vert);
+}
diff --git a/asset/static/fonts/less/path.less b/asset/static/fonts/less/path.less
new file mode 100755
index 0000000..9211e66
--- /dev/null
+++ b/asset/static/fonts/less/path.less
@@ -0,0 +1,15 @@
+/* FONT PATH
+ * -------------------------- */
+
+@font-face {
+ font-family: 'FontAwesome';
+ src: url('@{fa-font-path}/fontawesome-webfont.eot?v=@{fa-version}');
+ src: url('@{fa-font-path}/fontawesome-webfont.eot?#iefix&v=@{fa-version}') format('embedded-opentype'),
+ url('@{fa-font-path}/fontawesome-webfont.woff2?v=@{fa-version}') format('woff2'),
+ url('@{fa-font-path}/fontawesome-webfont.woff?v=@{fa-version}') format('woff'),
+ url('@{fa-font-path}/fontawesome-webfont.ttf?v=@{fa-version}') format('truetype'),
+ url('@{fa-font-path}/fontawesome-webfont.svg?v=@{fa-version}#fontawesomeregular') format('svg');
+// src: url('@{fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts
+ font-weight: normal;
+ font-style: normal;
+}
diff --git a/asset/static/fonts/less/rotated-flipped.less b/asset/static/fonts/less/rotated-flipped.less
new file mode 100755
index 0000000..f6ba814
--- /dev/null
+++ b/asset/static/fonts/less/rotated-flipped.less
@@ -0,0 +1,20 @@
+// Rotated & Flipped Icons
+// -------------------------
+
+.@{fa-css-prefix}-rotate-90 { .fa-icon-rotate(90deg, 1); }
+.@{fa-css-prefix}-rotate-180 { .fa-icon-rotate(180deg, 2); }
+.@{fa-css-prefix}-rotate-270 { .fa-icon-rotate(270deg, 3); }
+
+.@{fa-css-prefix}-flip-horizontal { .fa-icon-flip(-1, 1, 0); }
+.@{fa-css-prefix}-flip-vertical { .fa-icon-flip(1, -1, 2); }
+
+// Hook for IE8-9
+// -------------------------
+
+:root .@{fa-css-prefix}-rotate-90,
+:root .@{fa-css-prefix}-rotate-180,
+:root .@{fa-css-prefix}-rotate-270,
+:root .@{fa-css-prefix}-flip-horizontal,
+:root .@{fa-css-prefix}-flip-vertical {
+ filter: none;
+}
diff --git a/asset/static/fonts/less/spinning.less b/asset/static/fonts/less/spinning.less
new file mode 100755
index 0000000..6e1564e
--- /dev/null
+++ b/asset/static/fonts/less/spinning.less
@@ -0,0 +1,29 @@
+// Spinning Icons
+// --------------------------
+
+.@{fa-css-prefix}-spin {
+ -webkit-animation: fa-spin 2s infinite linear;
+ animation: fa-spin 2s infinite linear;
+}
+
+@-webkit-keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
+
+@keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
diff --git a/asset/static/fonts/less/stacked.less b/asset/static/fonts/less/stacked.less
new file mode 100755
index 0000000..fc53fb0
--- /dev/null
+++ b/asset/static/fonts/less/stacked.less
@@ -0,0 +1,20 @@
+// Stacked Icons
+// -------------------------
+
+.@{fa-css-prefix}-stack {
+ position: relative;
+ display: inline-block;
+ width: 2em;
+ height: 2em;
+ line-height: 2em;
+ vertical-align: middle;
+}
+.@{fa-css-prefix}-stack-1x, .@{fa-css-prefix}-stack-2x {
+ position: absolute;
+ left: 0;
+ width: 100%;
+ text-align: center;
+}
+.@{fa-css-prefix}-stack-1x { line-height: inherit; }
+.@{fa-css-prefix}-stack-2x { font-size: 2em; }
+.@{fa-css-prefix}-inverse { color: @fa-inverse; }
diff --git a/asset/static/fonts/less/variables.less b/asset/static/fonts/less/variables.less
new file mode 100755
index 0000000..37c4b80
--- /dev/null
+++ b/asset/static/fonts/less/variables.less
@@ -0,0 +1,708 @@
+// Variables
+// --------------------------
+
+@fa-font-path: "../fonts";
+@fa-font-size-base: 14px;
+@fa-line-height-base: 1;
+//@fa-font-path: "//netdna.bootstrapcdn.com/font-awesome/4.5.0/fonts"; // for referencing Bootstrap CDN font files directly
+@fa-css-prefix: fa;
+@fa-version: "4.5.0";
+@fa-border-color: #eee;
+@fa-inverse: #fff;
+@fa-li-width: (30em / 14);
+
+@fa-var-500px: "\f26e";
+@fa-var-adjust: "\f042";
+@fa-var-adn: "\f170";
+@fa-var-align-center: "\f037";
+@fa-var-align-justify: "\f039";
+@fa-var-align-left: "\f036";
+@fa-var-align-right: "\f038";
+@fa-var-amazon: "\f270";
+@fa-var-ambulance: "\f0f9";
+@fa-var-anchor: "\f13d";
+@fa-var-android: "\f17b";
+@fa-var-angellist: "\f209";
+@fa-var-angle-double-down: "\f103";
+@fa-var-angle-double-left: "\f100";
+@fa-var-angle-double-right: "\f101";
+@fa-var-angle-double-up: "\f102";
+@fa-var-angle-down: "\f107";
+@fa-var-angle-left: "\f104";
+@fa-var-angle-right: "\f105";
+@fa-var-angle-up: "\f106";
+@fa-var-apple: "\f179";
+@fa-var-archive: "\f187";
+@fa-var-area-chart: "\f1fe";
+@fa-var-arrow-circle-down: "\f0ab";
+@fa-var-arrow-circle-left: "\f0a8";
+@fa-var-arrow-circle-o-down: "\f01a";
+@fa-var-arrow-circle-o-left: "\f190";
+@fa-var-arrow-circle-o-right: "\f18e";
+@fa-var-arrow-circle-o-up: "\f01b";
+@fa-var-arrow-circle-right: "\f0a9";
+@fa-var-arrow-circle-up: "\f0aa";
+@fa-var-arrow-down: "\f063";
+@fa-var-arrow-left: "\f060";
+@fa-var-arrow-right: "\f061";
+@fa-var-arrow-up: "\f062";
+@fa-var-arrows: "\f047";
+@fa-var-arrows-alt: "\f0b2";
+@fa-var-arrows-h: "\f07e";
+@fa-var-arrows-v: "\f07d";
+@fa-var-asterisk: "\f069";
+@fa-var-at: "\f1fa";
+@fa-var-automobile: "\f1b9";
+@fa-var-backward: "\f04a";
+@fa-var-balance-scale: "\f24e";
+@fa-var-ban: "\f05e";
+@fa-var-bank: "\f19c";
+@fa-var-bar-chart: "\f080";
+@fa-var-bar-chart-o: "\f080";
+@fa-var-barcode: "\f02a";
+@fa-var-bars: "\f0c9";
+@fa-var-battery-0: "\f244";
+@fa-var-battery-1: "\f243";
+@fa-var-battery-2: "\f242";
+@fa-var-battery-3: "\f241";
+@fa-var-battery-4: "\f240";
+@fa-var-battery-empty: "\f244";
+@fa-var-battery-full: "\f240";
+@fa-var-battery-half: "\f242";
+@fa-var-battery-quarter: "\f243";
+@fa-var-battery-three-quarters: "\f241";
+@fa-var-bed: "\f236";
+@fa-var-beer: "\f0fc";
+@fa-var-behance: "\f1b4";
+@fa-var-behance-square: "\f1b5";
+@fa-var-bell: "\f0f3";
+@fa-var-bell-o: "\f0a2";
+@fa-var-bell-slash: "\f1f6";
+@fa-var-bell-slash-o: "\f1f7";
+@fa-var-bicycle: "\f206";
+@fa-var-binoculars: "\f1e5";
+@fa-var-birthday-cake: "\f1fd";
+@fa-var-bitbucket: "\f171";
+@fa-var-bitbucket-square: "\f172";
+@fa-var-bitcoin: "\f15a";
+@fa-var-black-tie: "\f27e";
+@fa-var-bluetooth: "\f293";
+@fa-var-bluetooth-b: "\f294";
+@fa-var-bold: "\f032";
+@fa-var-bolt: "\f0e7";
+@fa-var-bomb: "\f1e2";
+@fa-var-book: "\f02d";
+@fa-var-bookmark: "\f02e";
+@fa-var-bookmark-o: "\f097";
+@fa-var-briefcase: "\f0b1";
+@fa-var-btc: "\f15a";
+@fa-var-bug: "\f188";
+@fa-var-building: "\f1ad";
+@fa-var-building-o: "\f0f7";
+@fa-var-bullhorn: "\f0a1";
+@fa-var-bullseye: "\f140";
+@fa-var-bus: "\f207";
+@fa-var-buysellads: "\f20d";
+@fa-var-cab: "\f1ba";
+@fa-var-calculator: "\f1ec";
+@fa-var-calendar: "\f073";
+@fa-var-calendar-check-o: "\f274";
+@fa-var-calendar-minus-o: "\f272";
+@fa-var-calendar-o: "\f133";
+@fa-var-calendar-plus-o: "\f271";
+@fa-var-calendar-times-o: "\f273";
+@fa-var-camera: "\f030";
+@fa-var-camera-retro: "\f083";
+@fa-var-car: "\f1b9";
+@fa-var-caret-down: "\f0d7";
+@fa-var-caret-left: "\f0d9";
+@fa-var-caret-right: "\f0da";
+@fa-var-caret-square-o-down: "\f150";
+@fa-var-caret-square-o-left: "\f191";
+@fa-var-caret-square-o-right: "\f152";
+@fa-var-caret-square-o-up: "\f151";
+@fa-var-caret-up: "\f0d8";
+@fa-var-cart-arrow-down: "\f218";
+@fa-var-cart-plus: "\f217";
+@fa-var-cc: "\f20a";
+@fa-var-cc-amex: "\f1f3";
+@fa-var-cc-diners-club: "\f24c";
+@fa-var-cc-discover: "\f1f2";
+@fa-var-cc-jcb: "\f24b";
+@fa-var-cc-mastercard: "\f1f1";
+@fa-var-cc-paypal: "\f1f4";
+@fa-var-cc-stripe: "\f1f5";
+@fa-var-cc-visa: "\f1f0";
+@fa-var-certificate: "\f0a3";
+@fa-var-chain: "\f0c1";
+@fa-var-chain-broken: "\f127";
+@fa-var-check: "\f00c";
+@fa-var-check-circle: "\f058";
+@fa-var-check-circle-o: "\f05d";
+@fa-var-check-square: "\f14a";
+@fa-var-check-square-o: "\f046";
+@fa-var-chevron-circle-down: "\f13a";
+@fa-var-chevron-circle-left: "\f137";
+@fa-var-chevron-circle-right: "\f138";
+@fa-var-chevron-circle-up: "\f139";
+@fa-var-chevron-down: "\f078";
+@fa-var-chevron-left: "\f053";
+@fa-var-chevron-right: "\f054";
+@fa-var-chevron-up: "\f077";
+@fa-var-child: "\f1ae";
+@fa-var-chrome: "\f268";
+@fa-var-circle: "\f111";
+@fa-var-circle-o: "\f10c";
+@fa-var-circle-o-notch: "\f1ce";
+@fa-var-circle-thin: "\f1db";
+@fa-var-clipboard: "\f0ea";
+@fa-var-clock-o: "\f017";
+@fa-var-clone: "\f24d";
+@fa-var-close: "\f00d";
+@fa-var-cloud: "\f0c2";
+@fa-var-cloud-download: "\f0ed";
+@fa-var-cloud-upload: "\f0ee";
+@fa-var-cny: "\f157";
+@fa-var-code: "\f121";
+@fa-var-code-fork: "\f126";
+@fa-var-codepen: "\f1cb";
+@fa-var-codiepie: "\f284";
+@fa-var-coffee: "\f0f4";
+@fa-var-cog: "\f013";
+@fa-var-cogs: "\f085";
+@fa-var-columns: "\f0db";
+@fa-var-comment: "\f075";
+@fa-var-comment-o: "\f0e5";
+@fa-var-commenting: "\f27a";
+@fa-var-commenting-o: "\f27b";
+@fa-var-comments: "\f086";
+@fa-var-comments-o: "\f0e6";
+@fa-var-compass: "\f14e";
+@fa-var-compress: "\f066";
+@fa-var-connectdevelop: "\f20e";
+@fa-var-contao: "\f26d";
+@fa-var-copy: "\f0c5";
+@fa-var-copyright: "\f1f9";
+@fa-var-creative-commons: "\f25e";
+@fa-var-credit-card: "\f09d";
+@fa-var-credit-card-alt: "\f283";
+@fa-var-crop: "\f125";
+@fa-var-crosshairs: "\f05b";
+@fa-var-css3: "\f13c";
+@fa-var-cube: "\f1b2";
+@fa-var-cubes: "\f1b3";
+@fa-var-cut: "\f0c4";
+@fa-var-cutlery: "\f0f5";
+@fa-var-dashboard: "\f0e4";
+@fa-var-dashcube: "\f210";
+@fa-var-database: "\f1c0";
+@fa-var-dedent: "\f03b";
+@fa-var-delicious: "\f1a5";
+@fa-var-desktop: "\f108";
+@fa-var-deviantart: "\f1bd";
+@fa-var-diamond: "\f219";
+@fa-var-digg: "\f1a6";
+@fa-var-dollar: "\f155";
+@fa-var-dot-circle-o: "\f192";
+@fa-var-download: "\f019";
+@fa-var-dribbble: "\f17d";
+@fa-var-dropbox: "\f16b";
+@fa-var-drupal: "\f1a9";
+@fa-var-edge: "\f282";
+@fa-var-edit: "\f044";
+@fa-var-eject: "\f052";
+@fa-var-ellipsis-h: "\f141";
+@fa-var-ellipsis-v: "\f142";
+@fa-var-empire: "\f1d1";
+@fa-var-envelope: "\f0e0";
+@fa-var-envelope-o: "\f003";
+@fa-var-envelope-square: "\f199";
+@fa-var-eraser: "\f12d";
+@fa-var-eur: "\f153";
+@fa-var-euro: "\f153";
+@fa-var-exchange: "\f0ec";
+@fa-var-exclamation: "\f12a";
+@fa-var-exclamation-circle: "\f06a";
+@fa-var-exclamation-triangle: "\f071";
+@fa-var-expand: "\f065";
+@fa-var-expeditedssl: "\f23e";
+@fa-var-external-link: "\f08e";
+@fa-var-external-link-square: "\f14c";
+@fa-var-eye: "\f06e";
+@fa-var-eye-slash: "\f070";
+@fa-var-eyedropper: "\f1fb";
+@fa-var-facebook: "\f09a";
+@fa-var-facebook-f: "\f09a";
+@fa-var-facebook-official: "\f230";
+@fa-var-facebook-square: "\f082";
+@fa-var-fast-backward: "\f049";
+@fa-var-fast-forward: "\f050";
+@fa-var-fax: "\f1ac";
+@fa-var-feed: "\f09e";
+@fa-var-female: "\f182";
+@fa-var-fighter-jet: "\f0fb";
+@fa-var-file: "\f15b";
+@fa-var-file-archive-o: "\f1c6";
+@fa-var-file-audio-o: "\f1c7";
+@fa-var-file-code-o: "\f1c9";
+@fa-var-file-excel-o: "\f1c3";
+@fa-var-file-image-o: "\f1c5";
+@fa-var-file-movie-o: "\f1c8";
+@fa-var-file-o: "\f016";
+@fa-var-file-pdf-o: "\f1c1";
+@fa-var-file-photo-o: "\f1c5";
+@fa-var-file-picture-o: "\f1c5";
+@fa-var-file-powerpoint-o: "\f1c4";
+@fa-var-file-sound-o: "\f1c7";
+@fa-var-file-text: "\f15c";
+@fa-var-file-text-o: "\f0f6";
+@fa-var-file-video-o: "\f1c8";
+@fa-var-file-word-o: "\f1c2";
+@fa-var-file-zip-o: "\f1c6";
+@fa-var-files-o: "\f0c5";
+@fa-var-film: "\f008";
+@fa-var-filter: "\f0b0";
+@fa-var-fire: "\f06d";
+@fa-var-fire-extinguisher: "\f134";
+@fa-var-firefox: "\f269";
+@fa-var-flag: "\f024";
+@fa-var-flag-checkered: "\f11e";
+@fa-var-flag-o: "\f11d";
+@fa-var-flash: "\f0e7";
+@fa-var-flask: "\f0c3";
+@fa-var-flickr: "\f16e";
+@fa-var-floppy-o: "\f0c7";
+@fa-var-folder: "\f07b";
+@fa-var-folder-o: "\f114";
+@fa-var-folder-open: "\f07c";
+@fa-var-folder-open-o: "\f115";
+@fa-var-font: "\f031";
+@fa-var-fonticons: "\f280";
+@fa-var-fort-awesome: "\f286";
+@fa-var-forumbee: "\f211";
+@fa-var-forward: "\f04e";
+@fa-var-foursquare: "\f180";
+@fa-var-frown-o: "\f119";
+@fa-var-futbol-o: "\f1e3";
+@fa-var-gamepad: "\f11b";
+@fa-var-gavel: "\f0e3";
+@fa-var-gbp: "\f154";
+@fa-var-ge: "\f1d1";
+@fa-var-gear: "\f013";
+@fa-var-gears: "\f085";
+@fa-var-genderless: "\f22d";
+@fa-var-get-pocket: "\f265";
+@fa-var-gg: "\f260";
+@fa-var-gg-circle: "\f261";
+@fa-var-gift: "\f06b";
+@fa-var-git: "\f1d3";
+@fa-var-git-square: "\f1d2";
+@fa-var-github: "\f09b";
+@fa-var-github-alt: "\f113";
+@fa-var-github-square: "\f092";
+@fa-var-gittip: "\f184";
+@fa-var-glass: "\f000";
+@fa-var-globe: "\f0ac";
+@fa-var-google: "\f1a0";
+@fa-var-google-plus: "\f0d5";
+@fa-var-google-plus-square: "\f0d4";
+@fa-var-google-wallet: "\f1ee";
+@fa-var-graduation-cap: "\f19d";
+@fa-var-gratipay: "\f184";
+@fa-var-group: "\f0c0";
+@fa-var-h-square: "\f0fd";
+@fa-var-hacker-news: "\f1d4";
+@fa-var-hand-grab-o: "\f255";
+@fa-var-hand-lizard-o: "\f258";
+@fa-var-hand-o-down: "\f0a7";
+@fa-var-hand-o-left: "\f0a5";
+@fa-var-hand-o-right: "\f0a4";
+@fa-var-hand-o-up: "\f0a6";
+@fa-var-hand-paper-o: "\f256";
+@fa-var-hand-peace-o: "\f25b";
+@fa-var-hand-pointer-o: "\f25a";
+@fa-var-hand-rock-o: "\f255";
+@fa-var-hand-scissors-o: "\f257";
+@fa-var-hand-spock-o: "\f259";
+@fa-var-hand-stop-o: "\f256";
+@fa-var-hashtag: "\f292";
+@fa-var-hdd-o: "\f0a0";
+@fa-var-header: "\f1dc";
+@fa-var-headphones: "\f025";
+@fa-var-heart: "\f004";
+@fa-var-heart-o: "\f08a";
+@fa-var-heartbeat: "\f21e";
+@fa-var-history: "\f1da";
+@fa-var-home: "\f015";
+@fa-var-hospital-o: "\f0f8";
+@fa-var-hotel: "\f236";
+@fa-var-hourglass: "\f254";
+@fa-var-hourglass-1: "\f251";
+@fa-var-hourglass-2: "\f252";
+@fa-var-hourglass-3: "\f253";
+@fa-var-hourglass-end: "\f253";
+@fa-var-hourglass-half: "\f252";
+@fa-var-hourglass-o: "\f250";
+@fa-var-hourglass-start: "\f251";
+@fa-var-houzz: "\f27c";
+@fa-var-html5: "\f13b";
+@fa-var-i-cursor: "\f246";
+@fa-var-ils: "\f20b";
+@fa-var-image: "\f03e";
+@fa-var-inbox: "\f01c";
+@fa-var-indent: "\f03c";
+@fa-var-industry: "\f275";
+@fa-var-info: "\f129";
+@fa-var-info-circle: "\f05a";
+@fa-var-inr: "\f156";
+@fa-var-instagram: "\f16d";
+@fa-var-institution: "\f19c";
+@fa-var-internet-explorer: "\f26b";
+@fa-var-intersex: "\f224";
+@fa-var-ioxhost: "\f208";
+@fa-var-italic: "\f033";
+@fa-var-joomla: "\f1aa";
+@fa-var-jpy: "\f157";
+@fa-var-jsfiddle: "\f1cc";
+@fa-var-key: "\f084";
+@fa-var-keyboard-o: "\f11c";
+@fa-var-krw: "\f159";
+@fa-var-language: "\f1ab";
+@fa-var-laptop: "\f109";
+@fa-var-lastfm: "\f202";
+@fa-var-lastfm-square: "\f203";
+@fa-var-leaf: "\f06c";
+@fa-var-leanpub: "\f212";
+@fa-var-legal: "\f0e3";
+@fa-var-lemon-o: "\f094";
+@fa-var-level-down: "\f149";
+@fa-var-level-up: "\f148";
+@fa-var-life-bouy: "\f1cd";
+@fa-var-life-buoy: "\f1cd";
+@fa-var-life-ring: "\f1cd";
+@fa-var-life-saver: "\f1cd";
+@fa-var-lightbulb-o: "\f0eb";
+@fa-var-line-chart: "\f201";
+@fa-var-link: "\f0c1";
+@fa-var-linkedin: "\f0e1";
+@fa-var-linkedin-square: "\f08c";
+@fa-var-linux: "\f17c";
+@fa-var-list: "\f03a";
+@fa-var-list-alt: "\f022";
+@fa-var-list-ol: "\f0cb";
+@fa-var-list-ul: "\f0ca";
+@fa-var-location-arrow: "\f124";
+@fa-var-lock: "\f023";
+@fa-var-long-arrow-down: "\f175";
+@fa-var-long-arrow-left: "\f177";
+@fa-var-long-arrow-right: "\f178";
+@fa-var-long-arrow-up: "\f176";
+@fa-var-magic: "\f0d0";
+@fa-var-magnet: "\f076";
+@fa-var-mail-forward: "\f064";
+@fa-var-mail-reply: "\f112";
+@fa-var-mail-reply-all: "\f122";
+@fa-var-male: "\f183";
+@fa-var-map: "\f279";
+@fa-var-map-marker: "\f041";
+@fa-var-map-o: "\f278";
+@fa-var-map-pin: "\f276";
+@fa-var-map-signs: "\f277";
+@fa-var-mars: "\f222";
+@fa-var-mars-double: "\f227";
+@fa-var-mars-stroke: "\f229";
+@fa-var-mars-stroke-h: "\f22b";
+@fa-var-mars-stroke-v: "\f22a";
+@fa-var-maxcdn: "\f136";
+@fa-var-meanpath: "\f20c";
+@fa-var-medium: "\f23a";
+@fa-var-medkit: "\f0fa";
+@fa-var-meh-o: "\f11a";
+@fa-var-mercury: "\f223";
+@fa-var-microphone: "\f130";
+@fa-var-microphone-slash: "\f131";
+@fa-var-minus: "\f068";
+@fa-var-minus-circle: "\f056";
+@fa-var-minus-square: "\f146";
+@fa-var-minus-square-o: "\f147";
+@fa-var-mixcloud: "\f289";
+@fa-var-mobile: "\f10b";
+@fa-var-mobile-phone: "\f10b";
+@fa-var-modx: "\f285";
+@fa-var-money: "\f0d6";
+@fa-var-moon-o: "\f186";
+@fa-var-mortar-board: "\f19d";
+@fa-var-motorcycle: "\f21c";
+@fa-var-mouse-pointer: "\f245";
+@fa-var-music: "\f001";
+@fa-var-navicon: "\f0c9";
+@fa-var-neuter: "\f22c";
+@fa-var-newspaper-o: "\f1ea";
+@fa-var-object-group: "\f247";
+@fa-var-object-ungroup: "\f248";
+@fa-var-odnoklassniki: "\f263";
+@fa-var-odnoklassniki-square: "\f264";
+@fa-var-opencart: "\f23d";
+@fa-var-openid: "\f19b";
+@fa-var-opera: "\f26a";
+@fa-var-optin-monster: "\f23c";
+@fa-var-outdent: "\f03b";
+@fa-var-pagelines: "\f18c";
+@fa-var-paint-brush: "\f1fc";
+@fa-var-paper-plane: "\f1d8";
+@fa-var-paper-plane-o: "\f1d9";
+@fa-var-paperclip: "\f0c6";
+@fa-var-paragraph: "\f1dd";
+@fa-var-paste: "\f0ea";
+@fa-var-pause: "\f04c";
+@fa-var-pause-circle: "\f28b";
+@fa-var-pause-circle-o: "\f28c";
+@fa-var-paw: "\f1b0";
+@fa-var-paypal: "\f1ed";
+@fa-var-pencil: "\f040";
+@fa-var-pencil-square: "\f14b";
+@fa-var-pencil-square-o: "\f044";
+@fa-var-percent: "\f295";
+@fa-var-phone: "\f095";
+@fa-var-phone-square: "\f098";
+@fa-var-photo: "\f03e";
+@fa-var-picture-o: "\f03e";
+@fa-var-pie-chart: "\f200";
+@fa-var-pied-piper: "\f1a7";
+@fa-var-pied-piper-alt: "\f1a8";
+@fa-var-pinterest: "\f0d2";
+@fa-var-pinterest-p: "\f231";
+@fa-var-pinterest-square: "\f0d3";
+@fa-var-plane: "\f072";
+@fa-var-play: "\f04b";
+@fa-var-play-circle: "\f144";
+@fa-var-play-circle-o: "\f01d";
+@fa-var-plug: "\f1e6";
+@fa-var-plus: "\f067";
+@fa-var-plus-circle: "\f055";
+@fa-var-plus-square: "\f0fe";
+@fa-var-plus-square-o: "\f196";
+@fa-var-power-off: "\f011";
+@fa-var-print: "\f02f";
+@fa-var-product-hunt: "\f288";
+@fa-var-puzzle-piece: "\f12e";
+@fa-var-qq: "\f1d6";
+@fa-var-qrcode: "\f029";
+@fa-var-question: "\f128";
+@fa-var-question-circle: "\f059";
+@fa-var-quote-left: "\f10d";
+@fa-var-quote-right: "\f10e";
+@fa-var-ra: "\f1d0";
+@fa-var-random: "\f074";
+@fa-var-rebel: "\f1d0";
+@fa-var-recycle: "\f1b8";
+@fa-var-reddit: "\f1a1";
+@fa-var-reddit-alien: "\f281";
+@fa-var-reddit-square: "\f1a2";
+@fa-var-refresh: "\f021";
+@fa-var-registered: "\f25d";
+@fa-var-remove: "\f00d";
+@fa-var-renren: "\f18b";
+@fa-var-reorder: "\f0c9";
+@fa-var-repeat: "\f01e";
+@fa-var-reply: "\f112";
+@fa-var-reply-all: "\f122";
+@fa-var-retweet: "\f079";
+@fa-var-rmb: "\f157";
+@fa-var-road: "\f018";
+@fa-var-rocket: "\f135";
+@fa-var-rotate-left: "\f0e2";
+@fa-var-rotate-right: "\f01e";
+@fa-var-rouble: "\f158";
+@fa-var-rss: "\f09e";
+@fa-var-rss-square: "\f143";
+@fa-var-rub: "\f158";
+@fa-var-ruble: "\f158";
+@fa-var-rupee: "\f156";
+@fa-var-safari: "\f267";
+@fa-var-save: "\f0c7";
+@fa-var-scissors: "\f0c4";
+@fa-var-scribd: "\f28a";
+@fa-var-search: "\f002";
+@fa-var-search-minus: "\f010";
+@fa-var-search-plus: "\f00e";
+@fa-var-sellsy: "\f213";
+@fa-var-send: "\f1d8";
+@fa-var-send-o: "\f1d9";
+@fa-var-server: "\f233";
+@fa-var-share: "\f064";
+@fa-var-share-alt: "\f1e0";
+@fa-var-share-alt-square: "\f1e1";
+@fa-var-share-square: "\f14d";
+@fa-var-share-square-o: "\f045";
+@fa-var-shekel: "\f20b";
+@fa-var-sheqel: "\f20b";
+@fa-var-shield: "\f132";
+@fa-var-ship: "\f21a";
+@fa-var-shirtsinbulk: "\f214";
+@fa-var-shopping-bag: "\f290";
+@fa-var-shopping-basket: "\f291";
+@fa-var-shopping-cart: "\f07a";
+@fa-var-sign-in: "\f090";
+@fa-var-sign-out: "\f08b";
+@fa-var-signal: "\f012";
+@fa-var-simplybuilt: "\f215";
+@fa-var-sitemap: "\f0e8";
+@fa-var-skyatlas: "\f216";
+@fa-var-skype: "\f17e";
+@fa-var-slack: "\f198";
+@fa-var-sliders: "\f1de";
+@fa-var-slideshare: "\f1e7";
+@fa-var-smile-o: "\f118";
+@fa-var-soccer-ball-o: "\f1e3";
+@fa-var-sort: "\f0dc";
+@fa-var-sort-alpha-asc: "\f15d";
+@fa-var-sort-alpha-desc: "\f15e";
+@fa-var-sort-amount-asc: "\f160";
+@fa-var-sort-amount-desc: "\f161";
+@fa-var-sort-asc: "\f0de";
+@fa-var-sort-desc: "\f0dd";
+@fa-var-sort-down: "\f0dd";
+@fa-var-sort-numeric-asc: "\f162";
+@fa-var-sort-numeric-desc: "\f163";
+@fa-var-sort-up: "\f0de";
+@fa-var-soundcloud: "\f1be";
+@fa-var-space-shuttle: "\f197";
+@fa-var-spinner: "\f110";
+@fa-var-spoon: "\f1b1";
+@fa-var-spotify: "\f1bc";
+@fa-var-square: "\f0c8";
+@fa-var-square-o: "\f096";
+@fa-var-stack-exchange: "\f18d";
+@fa-var-stack-overflow: "\f16c";
+@fa-var-star: "\f005";
+@fa-var-star-half: "\f089";
+@fa-var-star-half-empty: "\f123";
+@fa-var-star-half-full: "\f123";
+@fa-var-star-half-o: "\f123";
+@fa-var-star-o: "\f006";
+@fa-var-steam: "\f1b6";
+@fa-var-steam-square: "\f1b7";
+@fa-var-step-backward: "\f048";
+@fa-var-step-forward: "\f051";
+@fa-var-stethoscope: "\f0f1";
+@fa-var-sticky-note: "\f249";
+@fa-var-sticky-note-o: "\f24a";
+@fa-var-stop: "\f04d";
+@fa-var-stop-circle: "\f28d";
+@fa-var-stop-circle-o: "\f28e";
+@fa-var-street-view: "\f21d";
+@fa-var-strikethrough: "\f0cc";
+@fa-var-stumbleupon: "\f1a4";
+@fa-var-stumbleupon-circle: "\f1a3";
+@fa-var-subscript: "\f12c";
+@fa-var-subway: "\f239";
+@fa-var-suitcase: "\f0f2";
+@fa-var-sun-o: "\f185";
+@fa-var-superscript: "\f12b";
+@fa-var-support: "\f1cd";
+@fa-var-table: "\f0ce";
+@fa-var-tablet: "\f10a";
+@fa-var-tachometer: "\f0e4";
+@fa-var-tag: "\f02b";
+@fa-var-tags: "\f02c";
+@fa-var-tasks: "\f0ae";
+@fa-var-taxi: "\f1ba";
+@fa-var-television: "\f26c";
+@fa-var-tencent-weibo: "\f1d5";
+@fa-var-terminal: "\f120";
+@fa-var-text-height: "\f034";
+@fa-var-text-width: "\f035";
+@fa-var-th: "\f00a";
+@fa-var-th-large: "\f009";
+@fa-var-th-list: "\f00b";
+@fa-var-thumb-tack: "\f08d";
+@fa-var-thumbs-down: "\f165";
+@fa-var-thumbs-o-down: "\f088";
+@fa-var-thumbs-o-up: "\f087";
+@fa-var-thumbs-up: "\f164";
+@fa-var-ticket: "\f145";
+@fa-var-times: "\f00d";
+@fa-var-times-circle: "\f057";
+@fa-var-times-circle-o: "\f05c";
+@fa-var-tint: "\f043";
+@fa-var-toggle-down: "\f150";
+@fa-var-toggle-left: "\f191";
+@fa-var-toggle-off: "\f204";
+@fa-var-toggle-on: "\f205";
+@fa-var-toggle-right: "\f152";
+@fa-var-toggle-up: "\f151";
+@fa-var-trademark: "\f25c";
+@fa-var-train: "\f238";
+@fa-var-transgender: "\f224";
+@fa-var-transgender-alt: "\f225";
+@fa-var-trash: "\f1f8";
+@fa-var-trash-o: "\f014";
+@fa-var-tree: "\f1bb";
+@fa-var-trello: "\f181";
+@fa-var-tripadvisor: "\f262";
+@fa-var-trophy: "\f091";
+@fa-var-truck: "\f0d1";
+@fa-var-try: "\f195";
+@fa-var-tty: "\f1e4";
+@fa-var-tumblr: "\f173";
+@fa-var-tumblr-square: "\f174";
+@fa-var-turkish-lira: "\f195";
+@fa-var-tv: "\f26c";
+@fa-var-twitch: "\f1e8";
+@fa-var-twitter: "\f099";
+@fa-var-twitter-square: "\f081";
+@fa-var-umbrella: "\f0e9";
+@fa-var-underline: "\f0cd";
+@fa-var-undo: "\f0e2";
+@fa-var-university: "\f19c";
+@fa-var-unlink: "\f127";
+@fa-var-unlock: "\f09c";
+@fa-var-unlock-alt: "\f13e";
+@fa-var-unsorted: "\f0dc";
+@fa-var-upload: "\f093";
+@fa-var-usb: "\f287";
+@fa-var-usd: "\f155";
+@fa-var-user: "\f007";
+@fa-var-user-md: "\f0f0";
+@fa-var-user-plus: "\f234";
+@fa-var-user-secret: "\f21b";
+@fa-var-user-times: "\f235";
+@fa-var-users: "\f0c0";
+@fa-var-venus: "\f221";
+@fa-var-venus-double: "\f226";
+@fa-var-venus-mars: "\f228";
+@fa-var-viacoin: "\f237";
+@fa-var-video-camera: "\f03d";
+@fa-var-vimeo: "\f27d";
+@fa-var-vimeo-square: "\f194";
+@fa-var-vine: "\f1ca";
+@fa-var-vk: "\f189";
+@fa-var-volume-down: "\f027";
+@fa-var-volume-off: "\f026";
+@fa-var-volume-up: "\f028";
+@fa-var-warning: "\f071";
+@fa-var-wechat: "\f1d7";
+@fa-var-weibo: "\f18a";
+@fa-var-weixin: "\f1d7";
+@fa-var-whatsapp: "\f232";
+@fa-var-wheelchair: "\f193";
+@fa-var-wifi: "\f1eb";
+@fa-var-wikipedia-w: "\f266";
+@fa-var-windows: "\f17a";
+@fa-var-won: "\f159";
+@fa-var-wordpress: "\f19a";
+@fa-var-wrench: "\f0ad";
+@fa-var-xing: "\f168";
+@fa-var-xing-square: "\f169";
+@fa-var-y-combinator: "\f23b";
+@fa-var-y-combinator-square: "\f1d4";
+@fa-var-yahoo: "\f19e";
+@fa-var-yc: "\f23b";
+@fa-var-yc-square: "\f1d4";
+@fa-var-yelp: "\f1e9";
+@fa-var-yen: "\f157";
+@fa-var-youtube: "\f167";
+@fa-var-youtube-play: "\f16a";
+@fa-var-youtube-square: "\f166";
+
diff --git a/asset/static/fonts/scss/_animated.scss b/asset/static/fonts/scss/_animated.scss
new file mode 100755
index 0000000..8a020db
--- /dev/null
+++ b/asset/static/fonts/scss/_animated.scss
@@ -0,0 +1,34 @@
+// Spinning Icons
+// --------------------------
+
+.#{$fa-css-prefix}-spin {
+ -webkit-animation: fa-spin 2s infinite linear;
+ animation: fa-spin 2s infinite linear;
+}
+
+.#{$fa-css-prefix}-pulse {
+ -webkit-animation: fa-spin 1s infinite steps(8);
+ animation: fa-spin 1s infinite steps(8);
+}
+
+@-webkit-keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
+
+@keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
diff --git a/asset/static/fonts/scss/_bordered-pulled.scss b/asset/static/fonts/scss/_bordered-pulled.scss
new file mode 100755
index 0000000..d4b85a0
--- /dev/null
+++ b/asset/static/fonts/scss/_bordered-pulled.scss
@@ -0,0 +1,25 @@
+// Bordered & Pulled
+// -------------------------
+
+.#{$fa-css-prefix}-border {
+ padding: .2em .25em .15em;
+ border: solid .08em $fa-border-color;
+ border-radius: .1em;
+}
+
+.#{$fa-css-prefix}-pull-left { float: left; }
+.#{$fa-css-prefix}-pull-right { float: right; }
+
+.#{$fa-css-prefix} {
+ &.#{$fa-css-prefix}-pull-left { margin-right: .3em; }
+ &.#{$fa-css-prefix}-pull-right { margin-left: .3em; }
+}
+
+/* Deprecated as of 4.4.0 */
+.pull-right { float: right; }
+.pull-left { float: left; }
+
+.#{$fa-css-prefix} {
+ &.pull-left { margin-right: .3em; }
+ &.pull-right { margin-left: .3em; }
+}
diff --git a/asset/static/fonts/scss/_core.scss b/asset/static/fonts/scss/_core.scss
new file mode 100755
index 0000000..7425ef8
--- /dev/null
+++ b/asset/static/fonts/scss/_core.scss
@@ -0,0 +1,12 @@
+// Base Class Definition
+// -------------------------
+
+.#{$fa-css-prefix} {
+ display: inline-block;
+ font: normal normal normal #{$fa-font-size-base}/#{$fa-line-height-base} FontAwesome; // shortening font declaration
+ font-size: inherit; // can't have font-size inherit on line above, so need to override
+ text-rendering: auto; // optimizelegibility throws things off #1094
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+
+}
diff --git a/asset/static/fonts/scss/_fixed-width.scss b/asset/static/fonts/scss/_fixed-width.scss
new file mode 100755
index 0000000..b221c98
--- /dev/null
+++ b/asset/static/fonts/scss/_fixed-width.scss
@@ -0,0 +1,6 @@
+// Fixed Width Icons
+// -------------------------
+.#{$fa-css-prefix}-fw {
+ width: (18em / 14);
+ text-align: center;
+}
diff --git a/asset/static/fonts/scss/_icons.scss b/asset/static/fonts/scss/_icons.scss
new file mode 100755
index 0000000..6f93759
--- /dev/null
+++ b/asset/static/fonts/scss/_icons.scss
@@ -0,0 +1,697 @@
+/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
+ readers do not read off random characters that represent icons */
+
+.#{$fa-css-prefix}-glass:before { content: $fa-var-glass; }
+.#{$fa-css-prefix}-music:before { content: $fa-var-music; }
+.#{$fa-css-prefix}-search:before { content: $fa-var-search; }
+.#{$fa-css-prefix}-envelope-o:before { content: $fa-var-envelope-o; }
+.#{$fa-css-prefix}-heart:before { content: $fa-var-heart; }
+.#{$fa-css-prefix}-star:before { content: $fa-var-star; }
+.#{$fa-css-prefix}-star-o:before { content: $fa-var-star-o; }
+.#{$fa-css-prefix}-user:before { content: $fa-var-user; }
+.#{$fa-css-prefix}-film:before { content: $fa-var-film; }
+.#{$fa-css-prefix}-th-large:before { content: $fa-var-th-large; }
+.#{$fa-css-prefix}-th:before { content: $fa-var-th; }
+.#{$fa-css-prefix}-th-list:before { content: $fa-var-th-list; }
+.#{$fa-css-prefix}-check:before { content: $fa-var-check; }
+.#{$fa-css-prefix}-remove:before,
+.#{$fa-css-prefix}-close:before,
+.#{$fa-css-prefix}-times:before { content: $fa-var-times; }
+.#{$fa-css-prefix}-search-plus:before { content: $fa-var-search-plus; }
+.#{$fa-css-prefix}-search-minus:before { content: $fa-var-search-minus; }
+.#{$fa-css-prefix}-power-off:before { content: $fa-var-power-off; }
+.#{$fa-css-prefix}-signal:before { content: $fa-var-signal; }
+.#{$fa-css-prefix}-gear:before,
+.#{$fa-css-prefix}-cog:before { content: $fa-var-cog; }
+.#{$fa-css-prefix}-trash-o:before { content: $fa-var-trash-o; }
+.#{$fa-css-prefix}-home:before { content: $fa-var-home; }
+.#{$fa-css-prefix}-file-o:before { content: $fa-var-file-o; }
+.#{$fa-css-prefix}-clock-o:before { content: $fa-var-clock-o; }
+.#{$fa-css-prefix}-road:before { content: $fa-var-road; }
+.#{$fa-css-prefix}-download:before { content: $fa-var-download; }
+.#{$fa-css-prefix}-arrow-circle-o-down:before { content: $fa-var-arrow-circle-o-down; }
+.#{$fa-css-prefix}-arrow-circle-o-up:before { content: $fa-var-arrow-circle-o-up; }
+.#{$fa-css-prefix}-inbox:before { content: $fa-var-inbox; }
+.#{$fa-css-prefix}-play-circle-o:before { content: $fa-var-play-circle-o; }
+.#{$fa-css-prefix}-rotate-right:before,
+.#{$fa-css-prefix}-repeat:before { content: $fa-var-repeat; }
+.#{$fa-css-prefix}-refresh:before { content: $fa-var-refresh; }
+.#{$fa-css-prefix}-list-alt:before { content: $fa-var-list-alt; }
+.#{$fa-css-prefix}-lock:before { content: $fa-var-lock; }
+.#{$fa-css-prefix}-flag:before { content: $fa-var-flag; }
+.#{$fa-css-prefix}-headphones:before { content: $fa-var-headphones; }
+.#{$fa-css-prefix}-volume-off:before { content: $fa-var-volume-off; }
+.#{$fa-css-prefix}-volume-down:before { content: $fa-var-volume-down; }
+.#{$fa-css-prefix}-volume-up:before { content: $fa-var-volume-up; }
+.#{$fa-css-prefix}-qrcode:before { content: $fa-var-qrcode; }
+.#{$fa-css-prefix}-barcode:before { content: $fa-var-barcode; }
+.#{$fa-css-prefix}-tag:before { content: $fa-var-tag; }
+.#{$fa-css-prefix}-tags:before { content: $fa-var-tags; }
+.#{$fa-css-prefix}-book:before { content: $fa-var-book; }
+.#{$fa-css-prefix}-bookmark:before { content: $fa-var-bookmark; }
+.#{$fa-css-prefix}-print:before { content: $fa-var-print; }
+.#{$fa-css-prefix}-camera:before { content: $fa-var-camera; }
+.#{$fa-css-prefix}-font:before { content: $fa-var-font; }
+.#{$fa-css-prefix}-bold:before { content: $fa-var-bold; }
+.#{$fa-css-prefix}-italic:before { content: $fa-var-italic; }
+.#{$fa-css-prefix}-text-height:before { content: $fa-var-text-height; }
+.#{$fa-css-prefix}-text-width:before { content: $fa-var-text-width; }
+.#{$fa-css-prefix}-align-left:before { content: $fa-var-align-left; }
+.#{$fa-css-prefix}-align-center:before { content: $fa-var-align-center; }
+.#{$fa-css-prefix}-align-right:before { content: $fa-var-align-right; }
+.#{$fa-css-prefix}-align-justify:before { content: $fa-var-align-justify; }
+.#{$fa-css-prefix}-list:before { content: $fa-var-list; }
+.#{$fa-css-prefix}-dedent:before,
+.#{$fa-css-prefix}-outdent:before { content: $fa-var-outdent; }
+.#{$fa-css-prefix}-indent:before { content: $fa-var-indent; }
+.#{$fa-css-prefix}-video-camera:before { content: $fa-var-video-camera; }
+.#{$fa-css-prefix}-photo:before,
+.#{$fa-css-prefix}-image:before,
+.#{$fa-css-prefix}-picture-o:before { content: $fa-var-picture-o; }
+.#{$fa-css-prefix}-pencil:before { content: $fa-var-pencil; }
+.#{$fa-css-prefix}-map-marker:before { content: $fa-var-map-marker; }
+.#{$fa-css-prefix}-adjust:before { content: $fa-var-adjust; }
+.#{$fa-css-prefix}-tint:before { content: $fa-var-tint; }
+.#{$fa-css-prefix}-edit:before,
+.#{$fa-css-prefix}-pencil-square-o:before { content: $fa-var-pencil-square-o; }
+.#{$fa-css-prefix}-share-square-o:before { content: $fa-var-share-square-o; }
+.#{$fa-css-prefix}-check-square-o:before { content: $fa-var-check-square-o; }
+.#{$fa-css-prefix}-arrows:before { content: $fa-var-arrows; }
+.#{$fa-css-prefix}-step-backward:before { content: $fa-var-step-backward; }
+.#{$fa-css-prefix}-fast-backward:before { content: $fa-var-fast-backward; }
+.#{$fa-css-prefix}-backward:before { content: $fa-var-backward; }
+.#{$fa-css-prefix}-play:before { content: $fa-var-play; }
+.#{$fa-css-prefix}-pause:before { content: $fa-var-pause; }
+.#{$fa-css-prefix}-stop:before { content: $fa-var-stop; }
+.#{$fa-css-prefix}-forward:before { content: $fa-var-forward; }
+.#{$fa-css-prefix}-fast-forward:before { content: $fa-var-fast-forward; }
+.#{$fa-css-prefix}-step-forward:before { content: $fa-var-step-forward; }
+.#{$fa-css-prefix}-eject:before { content: $fa-var-eject; }
+.#{$fa-css-prefix}-chevron-left:before { content: $fa-var-chevron-left; }
+.#{$fa-css-prefix}-chevron-right:before { content: $fa-var-chevron-right; }
+.#{$fa-css-prefix}-plus-circle:before { content: $fa-var-plus-circle; }
+.#{$fa-css-prefix}-minus-circle:before { content: $fa-var-minus-circle; }
+.#{$fa-css-prefix}-times-circle:before { content: $fa-var-times-circle; }
+.#{$fa-css-prefix}-check-circle:before { content: $fa-var-check-circle; }
+.#{$fa-css-prefix}-question-circle:before { content: $fa-var-question-circle; }
+.#{$fa-css-prefix}-info-circle:before { content: $fa-var-info-circle; }
+.#{$fa-css-prefix}-crosshairs:before { content: $fa-var-crosshairs; }
+.#{$fa-css-prefix}-times-circle-o:before { content: $fa-var-times-circle-o; }
+.#{$fa-css-prefix}-check-circle-o:before { content: $fa-var-check-circle-o; }
+.#{$fa-css-prefix}-ban:before { content: $fa-var-ban; }
+.#{$fa-css-prefix}-arrow-left:before { content: $fa-var-arrow-left; }
+.#{$fa-css-prefix}-arrow-right:before { content: $fa-var-arrow-right; }
+.#{$fa-css-prefix}-arrow-up:before { content: $fa-var-arrow-up; }
+.#{$fa-css-prefix}-arrow-down:before { content: $fa-var-arrow-down; }
+.#{$fa-css-prefix}-mail-forward:before,
+.#{$fa-css-prefix}-share:before { content: $fa-var-share; }
+.#{$fa-css-prefix}-expand:before { content: $fa-var-expand; }
+.#{$fa-css-prefix}-compress:before { content: $fa-var-compress; }
+.#{$fa-css-prefix}-plus:before { content: $fa-var-plus; }
+.#{$fa-css-prefix}-minus:before { content: $fa-var-minus; }
+.#{$fa-css-prefix}-asterisk:before { content: $fa-var-asterisk; }
+.#{$fa-css-prefix}-exclamation-circle:before { content: $fa-var-exclamation-circle; }
+.#{$fa-css-prefix}-gift:before { content: $fa-var-gift; }
+.#{$fa-css-prefix}-leaf:before { content: $fa-var-leaf; }
+.#{$fa-css-prefix}-fire:before { content: $fa-var-fire; }
+.#{$fa-css-prefix}-eye:before { content: $fa-var-eye; }
+.#{$fa-css-prefix}-eye-slash:before { content: $fa-var-eye-slash; }
+.#{$fa-css-prefix}-warning:before,
+.#{$fa-css-prefix}-exclamation-triangle:before { content: $fa-var-exclamation-triangle; }
+.#{$fa-css-prefix}-plane:before { content: $fa-var-plane; }
+.#{$fa-css-prefix}-calendar:before { content: $fa-var-calendar; }
+.#{$fa-css-prefix}-random:before { content: $fa-var-random; }
+.#{$fa-css-prefix}-comment:before { content: $fa-var-comment; }
+.#{$fa-css-prefix}-magnet:before { content: $fa-var-magnet; }
+.#{$fa-css-prefix}-chevron-up:before { content: $fa-var-chevron-up; }
+.#{$fa-css-prefix}-chevron-down:before { content: $fa-var-chevron-down; }
+.#{$fa-css-prefix}-retweet:before { content: $fa-var-retweet; }
+.#{$fa-css-prefix}-shopping-cart:before { content: $fa-var-shopping-cart; }
+.#{$fa-css-prefix}-folder:before { content: $fa-var-folder; }
+.#{$fa-css-prefix}-folder-open:before { content: $fa-var-folder-open; }
+.#{$fa-css-prefix}-arrows-v:before { content: $fa-var-arrows-v; }
+.#{$fa-css-prefix}-arrows-h:before { content: $fa-var-arrows-h; }
+.#{$fa-css-prefix}-bar-chart-o:before,
+.#{$fa-css-prefix}-bar-chart:before { content: $fa-var-bar-chart; }
+.#{$fa-css-prefix}-twitter-square:before { content: $fa-var-twitter-square; }
+.#{$fa-css-prefix}-facebook-square:before { content: $fa-var-facebook-square; }
+.#{$fa-css-prefix}-camera-retro:before { content: $fa-var-camera-retro; }
+.#{$fa-css-prefix}-key:before { content: $fa-var-key; }
+.#{$fa-css-prefix}-gears:before,
+.#{$fa-css-prefix}-cogs:before { content: $fa-var-cogs; }
+.#{$fa-css-prefix}-comments:before { content: $fa-var-comments; }
+.#{$fa-css-prefix}-thumbs-o-up:before { content: $fa-var-thumbs-o-up; }
+.#{$fa-css-prefix}-thumbs-o-down:before { content: $fa-var-thumbs-o-down; }
+.#{$fa-css-prefix}-star-half:before { content: $fa-var-star-half; }
+.#{$fa-css-prefix}-heart-o:before { content: $fa-var-heart-o; }
+.#{$fa-css-prefix}-sign-out:before { content: $fa-var-sign-out; }
+.#{$fa-css-prefix}-linkedin-square:before { content: $fa-var-linkedin-square; }
+.#{$fa-css-prefix}-thumb-tack:before { content: $fa-var-thumb-tack; }
+.#{$fa-css-prefix}-external-link:before { content: $fa-var-external-link; }
+.#{$fa-css-prefix}-sign-in:before { content: $fa-var-sign-in; }
+.#{$fa-css-prefix}-trophy:before { content: $fa-var-trophy; }
+.#{$fa-css-prefix}-github-square:before { content: $fa-var-github-square; }
+.#{$fa-css-prefix}-upload:before { content: $fa-var-upload; }
+.#{$fa-css-prefix}-lemon-o:before { content: $fa-var-lemon-o; }
+.#{$fa-css-prefix}-phone:before { content: $fa-var-phone; }
+.#{$fa-css-prefix}-square-o:before { content: $fa-var-square-o; }
+.#{$fa-css-prefix}-bookmark-o:before { content: $fa-var-bookmark-o; }
+.#{$fa-css-prefix}-phone-square:before { content: $fa-var-phone-square; }
+.#{$fa-css-prefix}-twitter:before { content: $fa-var-twitter; }
+.#{$fa-css-prefix}-facebook-f:before,
+.#{$fa-css-prefix}-facebook:before { content: $fa-var-facebook; }
+.#{$fa-css-prefix}-github:before { content: $fa-var-github; }
+.#{$fa-css-prefix}-unlock:before { content: $fa-var-unlock; }
+.#{$fa-css-prefix}-credit-card:before { content: $fa-var-credit-card; }
+.#{$fa-css-prefix}-feed:before,
+.#{$fa-css-prefix}-rss:before { content: $fa-var-rss; }
+.#{$fa-css-prefix}-hdd-o:before { content: $fa-var-hdd-o; }
+.#{$fa-css-prefix}-bullhorn:before { content: $fa-var-bullhorn; }
+.#{$fa-css-prefix}-bell:before { content: $fa-var-bell; }
+.#{$fa-css-prefix}-certificate:before { content: $fa-var-certificate; }
+.#{$fa-css-prefix}-hand-o-right:before { content: $fa-var-hand-o-right; }
+.#{$fa-css-prefix}-hand-o-left:before { content: $fa-var-hand-o-left; }
+.#{$fa-css-prefix}-hand-o-up:before { content: $fa-var-hand-o-up; }
+.#{$fa-css-prefix}-hand-o-down:before { content: $fa-var-hand-o-down; }
+.#{$fa-css-prefix}-arrow-circle-left:before { content: $fa-var-arrow-circle-left; }
+.#{$fa-css-prefix}-arrow-circle-right:before { content: $fa-var-arrow-circle-right; }
+.#{$fa-css-prefix}-arrow-circle-up:before { content: $fa-var-arrow-circle-up; }
+.#{$fa-css-prefix}-arrow-circle-down:before { content: $fa-var-arrow-circle-down; }
+.#{$fa-css-prefix}-globe:before { content: $fa-var-globe; }
+.#{$fa-css-prefix}-wrench:before { content: $fa-var-wrench; }
+.#{$fa-css-prefix}-tasks:before { content: $fa-var-tasks; }
+.#{$fa-css-prefix}-filter:before { content: $fa-var-filter; }
+.#{$fa-css-prefix}-briefcase:before { content: $fa-var-briefcase; }
+.#{$fa-css-prefix}-arrows-alt:before { content: $fa-var-arrows-alt; }
+.#{$fa-css-prefix}-group:before,
+.#{$fa-css-prefix}-users:before { content: $fa-var-users; }
+.#{$fa-css-prefix}-chain:before,
+.#{$fa-css-prefix}-link:before { content: $fa-var-link; }
+.#{$fa-css-prefix}-cloud:before { content: $fa-var-cloud; }
+.#{$fa-css-prefix}-flask:before { content: $fa-var-flask; }
+.#{$fa-css-prefix}-cut:before,
+.#{$fa-css-prefix}-scissors:before { content: $fa-var-scissors; }
+.#{$fa-css-prefix}-copy:before,
+.#{$fa-css-prefix}-files-o:before { content: $fa-var-files-o; }
+.#{$fa-css-prefix}-paperclip:before { content: $fa-var-paperclip; }
+.#{$fa-css-prefix}-save:before,
+.#{$fa-css-prefix}-floppy-o:before { content: $fa-var-floppy-o; }
+.#{$fa-css-prefix}-square:before { content: $fa-var-square; }
+.#{$fa-css-prefix}-navicon:before,
+.#{$fa-css-prefix}-reorder:before,
+.#{$fa-css-prefix}-bars:before { content: $fa-var-bars; }
+.#{$fa-css-prefix}-list-ul:before { content: $fa-var-list-ul; }
+.#{$fa-css-prefix}-list-ol:before { content: $fa-var-list-ol; }
+.#{$fa-css-prefix}-strikethrough:before { content: $fa-var-strikethrough; }
+.#{$fa-css-prefix}-underline:before { content: $fa-var-underline; }
+.#{$fa-css-prefix}-table:before { content: $fa-var-table; }
+.#{$fa-css-prefix}-magic:before { content: $fa-var-magic; }
+.#{$fa-css-prefix}-truck:before { content: $fa-var-truck; }
+.#{$fa-css-prefix}-pinterest:before { content: $fa-var-pinterest; }
+.#{$fa-css-prefix}-pinterest-square:before { content: $fa-var-pinterest-square; }
+.#{$fa-css-prefix}-google-plus-square:before { content: $fa-var-google-plus-square; }
+.#{$fa-css-prefix}-google-plus:before { content: $fa-var-google-plus; }
+.#{$fa-css-prefix}-money:before { content: $fa-var-money; }
+.#{$fa-css-prefix}-caret-down:before { content: $fa-var-caret-down; }
+.#{$fa-css-prefix}-caret-up:before { content: $fa-var-caret-up; }
+.#{$fa-css-prefix}-caret-left:before { content: $fa-var-caret-left; }
+.#{$fa-css-prefix}-caret-right:before { content: $fa-var-caret-right; }
+.#{$fa-css-prefix}-columns:before { content: $fa-var-columns; }
+.#{$fa-css-prefix}-unsorted:before,
+.#{$fa-css-prefix}-sort:before { content: $fa-var-sort; }
+.#{$fa-css-prefix}-sort-down:before,
+.#{$fa-css-prefix}-sort-desc:before { content: $fa-var-sort-desc; }
+.#{$fa-css-prefix}-sort-up:before,
+.#{$fa-css-prefix}-sort-asc:before { content: $fa-var-sort-asc; }
+.#{$fa-css-prefix}-envelope:before { content: $fa-var-envelope; }
+.#{$fa-css-prefix}-linkedin:before { content: $fa-var-linkedin; }
+.#{$fa-css-prefix}-rotate-left:before,
+.#{$fa-css-prefix}-undo:before { content: $fa-var-undo; }
+.#{$fa-css-prefix}-legal:before,
+.#{$fa-css-prefix}-gavel:before { content: $fa-var-gavel; }
+.#{$fa-css-prefix}-dashboard:before,
+.#{$fa-css-prefix}-tachometer:before { content: $fa-var-tachometer; }
+.#{$fa-css-prefix}-comment-o:before { content: $fa-var-comment-o; }
+.#{$fa-css-prefix}-comments-o:before { content: $fa-var-comments-o; }
+.#{$fa-css-prefix}-flash:before,
+.#{$fa-css-prefix}-bolt:before { content: $fa-var-bolt; }
+.#{$fa-css-prefix}-sitemap:before { content: $fa-var-sitemap; }
+.#{$fa-css-prefix}-umbrella:before { content: $fa-var-umbrella; }
+.#{$fa-css-prefix}-paste:before,
+.#{$fa-css-prefix}-clipboard:before { content: $fa-var-clipboard; }
+.#{$fa-css-prefix}-lightbulb-o:before { content: $fa-var-lightbulb-o; }
+.#{$fa-css-prefix}-exchange:before { content: $fa-var-exchange; }
+.#{$fa-css-prefix}-cloud-download:before { content: $fa-var-cloud-download; }
+.#{$fa-css-prefix}-cloud-upload:before { content: $fa-var-cloud-upload; }
+.#{$fa-css-prefix}-user-md:before { content: $fa-var-user-md; }
+.#{$fa-css-prefix}-stethoscope:before { content: $fa-var-stethoscope; }
+.#{$fa-css-prefix}-suitcase:before { content: $fa-var-suitcase; }
+.#{$fa-css-prefix}-bell-o:before { content: $fa-var-bell-o; }
+.#{$fa-css-prefix}-coffee:before { content: $fa-var-coffee; }
+.#{$fa-css-prefix}-cutlery:before { content: $fa-var-cutlery; }
+.#{$fa-css-prefix}-file-text-o:before { content: $fa-var-file-text-o; }
+.#{$fa-css-prefix}-building-o:before { content: $fa-var-building-o; }
+.#{$fa-css-prefix}-hospital-o:before { content: $fa-var-hospital-o; }
+.#{$fa-css-prefix}-ambulance:before { content: $fa-var-ambulance; }
+.#{$fa-css-prefix}-medkit:before { content: $fa-var-medkit; }
+.#{$fa-css-prefix}-fighter-jet:before { content: $fa-var-fighter-jet; }
+.#{$fa-css-prefix}-beer:before { content: $fa-var-beer; }
+.#{$fa-css-prefix}-h-square:before { content: $fa-var-h-square; }
+.#{$fa-css-prefix}-plus-square:before { content: $fa-var-plus-square; }
+.#{$fa-css-prefix}-angle-double-left:before { content: $fa-var-angle-double-left; }
+.#{$fa-css-prefix}-angle-double-right:before { content: $fa-var-angle-double-right; }
+.#{$fa-css-prefix}-angle-double-up:before { content: $fa-var-angle-double-up; }
+.#{$fa-css-prefix}-angle-double-down:before { content: $fa-var-angle-double-down; }
+.#{$fa-css-prefix}-angle-left:before { content: $fa-var-angle-left; }
+.#{$fa-css-prefix}-angle-right:before { content: $fa-var-angle-right; }
+.#{$fa-css-prefix}-angle-up:before { content: $fa-var-angle-up; }
+.#{$fa-css-prefix}-angle-down:before { content: $fa-var-angle-down; }
+.#{$fa-css-prefix}-desktop:before { content: $fa-var-desktop; }
+.#{$fa-css-prefix}-laptop:before { content: $fa-var-laptop; }
+.#{$fa-css-prefix}-tablet:before { content: $fa-var-tablet; }
+.#{$fa-css-prefix}-mobile-phone:before,
+.#{$fa-css-prefix}-mobile:before { content: $fa-var-mobile; }
+.#{$fa-css-prefix}-circle-o:before { content: $fa-var-circle-o; }
+.#{$fa-css-prefix}-quote-left:before { content: $fa-var-quote-left; }
+.#{$fa-css-prefix}-quote-right:before { content: $fa-var-quote-right; }
+.#{$fa-css-prefix}-spinner:before { content: $fa-var-spinner; }
+.#{$fa-css-prefix}-circle:before { content: $fa-var-circle; }
+.#{$fa-css-prefix}-mail-reply:before,
+.#{$fa-css-prefix}-reply:before { content: $fa-var-reply; }
+.#{$fa-css-prefix}-github-alt:before { content: $fa-var-github-alt; }
+.#{$fa-css-prefix}-folder-o:before { content: $fa-var-folder-o; }
+.#{$fa-css-prefix}-folder-open-o:before { content: $fa-var-folder-open-o; }
+.#{$fa-css-prefix}-smile-o:before { content: $fa-var-smile-o; }
+.#{$fa-css-prefix}-frown-o:before { content: $fa-var-frown-o; }
+.#{$fa-css-prefix}-meh-o:before { content: $fa-var-meh-o; }
+.#{$fa-css-prefix}-gamepad:before { content: $fa-var-gamepad; }
+.#{$fa-css-prefix}-keyboard-o:before { content: $fa-var-keyboard-o; }
+.#{$fa-css-prefix}-flag-o:before { content: $fa-var-flag-o; }
+.#{$fa-css-prefix}-flag-checkered:before { content: $fa-var-flag-checkered; }
+.#{$fa-css-prefix}-terminal:before { content: $fa-var-terminal; }
+.#{$fa-css-prefix}-code:before { content: $fa-var-code; }
+.#{$fa-css-prefix}-mail-reply-all:before,
+.#{$fa-css-prefix}-reply-all:before { content: $fa-var-reply-all; }
+.#{$fa-css-prefix}-star-half-empty:before,
+.#{$fa-css-prefix}-star-half-full:before,
+.#{$fa-css-prefix}-star-half-o:before { content: $fa-var-star-half-o; }
+.#{$fa-css-prefix}-location-arrow:before { content: $fa-var-location-arrow; }
+.#{$fa-css-prefix}-crop:before { content: $fa-var-crop; }
+.#{$fa-css-prefix}-code-fork:before { content: $fa-var-code-fork; }
+.#{$fa-css-prefix}-unlink:before,
+.#{$fa-css-prefix}-chain-broken:before { content: $fa-var-chain-broken; }
+.#{$fa-css-prefix}-question:before { content: $fa-var-question; }
+.#{$fa-css-prefix}-info:before { content: $fa-var-info; }
+.#{$fa-css-prefix}-exclamation:before { content: $fa-var-exclamation; }
+.#{$fa-css-prefix}-superscript:before { content: $fa-var-superscript; }
+.#{$fa-css-prefix}-subscript:before { content: $fa-var-subscript; }
+.#{$fa-css-prefix}-eraser:before { content: $fa-var-eraser; }
+.#{$fa-css-prefix}-puzzle-piece:before { content: $fa-var-puzzle-piece; }
+.#{$fa-css-prefix}-microphone:before { content: $fa-var-microphone; }
+.#{$fa-css-prefix}-microphone-slash:before { content: $fa-var-microphone-slash; }
+.#{$fa-css-prefix}-shield:before { content: $fa-var-shield; }
+.#{$fa-css-prefix}-calendar-o:before { content: $fa-var-calendar-o; }
+.#{$fa-css-prefix}-fire-extinguisher:before { content: $fa-var-fire-extinguisher; }
+.#{$fa-css-prefix}-rocket:before { content: $fa-var-rocket; }
+.#{$fa-css-prefix}-maxcdn:before { content: $fa-var-maxcdn; }
+.#{$fa-css-prefix}-chevron-circle-left:before { content: $fa-var-chevron-circle-left; }
+.#{$fa-css-prefix}-chevron-circle-right:before { content: $fa-var-chevron-circle-right; }
+.#{$fa-css-prefix}-chevron-circle-up:before { content: $fa-var-chevron-circle-up; }
+.#{$fa-css-prefix}-chevron-circle-down:before { content: $fa-var-chevron-circle-down; }
+.#{$fa-css-prefix}-html5:before { content: $fa-var-html5; }
+.#{$fa-css-prefix}-css3:before { content: $fa-var-css3; }
+.#{$fa-css-prefix}-anchor:before { content: $fa-var-anchor; }
+.#{$fa-css-prefix}-unlock-alt:before { content: $fa-var-unlock-alt; }
+.#{$fa-css-prefix}-bullseye:before { content: $fa-var-bullseye; }
+.#{$fa-css-prefix}-ellipsis-h:before { content: $fa-var-ellipsis-h; }
+.#{$fa-css-prefix}-ellipsis-v:before { content: $fa-var-ellipsis-v; }
+.#{$fa-css-prefix}-rss-square:before { content: $fa-var-rss-square; }
+.#{$fa-css-prefix}-play-circle:before { content: $fa-var-play-circle; }
+.#{$fa-css-prefix}-ticket:before { content: $fa-var-ticket; }
+.#{$fa-css-prefix}-minus-square:before { content: $fa-var-minus-square; }
+.#{$fa-css-prefix}-minus-square-o:before { content: $fa-var-minus-square-o; }
+.#{$fa-css-prefix}-level-up:before { content: $fa-var-level-up; }
+.#{$fa-css-prefix}-level-down:before { content: $fa-var-level-down; }
+.#{$fa-css-prefix}-check-square:before { content: $fa-var-check-square; }
+.#{$fa-css-prefix}-pencil-square:before { content: $fa-var-pencil-square; }
+.#{$fa-css-prefix}-external-link-square:before { content: $fa-var-external-link-square; }
+.#{$fa-css-prefix}-share-square:before { content: $fa-var-share-square; }
+.#{$fa-css-prefix}-compass:before { content: $fa-var-compass; }
+.#{$fa-css-prefix}-toggle-down:before,
+.#{$fa-css-prefix}-caret-square-o-down:before { content: $fa-var-caret-square-o-down; }
+.#{$fa-css-prefix}-toggle-up:before,
+.#{$fa-css-prefix}-caret-square-o-up:before { content: $fa-var-caret-square-o-up; }
+.#{$fa-css-prefix}-toggle-right:before,
+.#{$fa-css-prefix}-caret-square-o-right:before { content: $fa-var-caret-square-o-right; }
+.#{$fa-css-prefix}-euro:before,
+.#{$fa-css-prefix}-eur:before { content: $fa-var-eur; }
+.#{$fa-css-prefix}-gbp:before { content: $fa-var-gbp; }
+.#{$fa-css-prefix}-dollar:before,
+.#{$fa-css-prefix}-usd:before { content: $fa-var-usd; }
+.#{$fa-css-prefix}-rupee:before,
+.#{$fa-css-prefix}-inr:before { content: $fa-var-inr; }
+.#{$fa-css-prefix}-cny:before,
+.#{$fa-css-prefix}-rmb:before,
+.#{$fa-css-prefix}-yen:before,
+.#{$fa-css-prefix}-jpy:before { content: $fa-var-jpy; }
+.#{$fa-css-prefix}-ruble:before,
+.#{$fa-css-prefix}-rouble:before,
+.#{$fa-css-prefix}-rub:before { content: $fa-var-rub; }
+.#{$fa-css-prefix}-won:before,
+.#{$fa-css-prefix}-krw:before { content: $fa-var-krw; }
+.#{$fa-css-prefix}-bitcoin:before,
+.#{$fa-css-prefix}-btc:before { content: $fa-var-btc; }
+.#{$fa-css-prefix}-file:before { content: $fa-var-file; }
+.#{$fa-css-prefix}-file-text:before { content: $fa-var-file-text; }
+.#{$fa-css-prefix}-sort-alpha-asc:before { content: $fa-var-sort-alpha-asc; }
+.#{$fa-css-prefix}-sort-alpha-desc:before { content: $fa-var-sort-alpha-desc; }
+.#{$fa-css-prefix}-sort-amount-asc:before { content: $fa-var-sort-amount-asc; }
+.#{$fa-css-prefix}-sort-amount-desc:before { content: $fa-var-sort-amount-desc; }
+.#{$fa-css-prefix}-sort-numeric-asc:before { content: $fa-var-sort-numeric-asc; }
+.#{$fa-css-prefix}-sort-numeric-desc:before { content: $fa-var-sort-numeric-desc; }
+.#{$fa-css-prefix}-thumbs-up:before { content: $fa-var-thumbs-up; }
+.#{$fa-css-prefix}-thumbs-down:before { content: $fa-var-thumbs-down; }
+.#{$fa-css-prefix}-youtube-square:before { content: $fa-var-youtube-square; }
+.#{$fa-css-prefix}-youtube:before { content: $fa-var-youtube; }
+.#{$fa-css-prefix}-xing:before { content: $fa-var-xing; }
+.#{$fa-css-prefix}-xing-square:before { content: $fa-var-xing-square; }
+.#{$fa-css-prefix}-youtube-play:before { content: $fa-var-youtube-play; }
+.#{$fa-css-prefix}-dropbox:before { content: $fa-var-dropbox; }
+.#{$fa-css-prefix}-stack-overflow:before { content: $fa-var-stack-overflow; }
+.#{$fa-css-prefix}-instagram:before { content: $fa-var-instagram; }
+.#{$fa-css-prefix}-flickr:before { content: $fa-var-flickr; }
+.#{$fa-css-prefix}-adn:before { content: $fa-var-adn; }
+.#{$fa-css-prefix}-bitbucket:before { content: $fa-var-bitbucket; }
+.#{$fa-css-prefix}-bitbucket-square:before { content: $fa-var-bitbucket-square; }
+.#{$fa-css-prefix}-tumblr:before { content: $fa-var-tumblr; }
+.#{$fa-css-prefix}-tumblr-square:before { content: $fa-var-tumblr-square; }
+.#{$fa-css-prefix}-long-arrow-down:before { content: $fa-var-long-arrow-down; }
+.#{$fa-css-prefix}-long-arrow-up:before { content: $fa-var-long-arrow-up; }
+.#{$fa-css-prefix}-long-arrow-left:before { content: $fa-var-long-arrow-left; }
+.#{$fa-css-prefix}-long-arrow-right:before { content: $fa-var-long-arrow-right; }
+.#{$fa-css-prefix}-apple:before { content: $fa-var-apple; }
+.#{$fa-css-prefix}-windows:before { content: $fa-var-windows; }
+.#{$fa-css-prefix}-android:before { content: $fa-var-android; }
+.#{$fa-css-prefix}-linux:before { content: $fa-var-linux; }
+.#{$fa-css-prefix}-dribbble:before { content: $fa-var-dribbble; }
+.#{$fa-css-prefix}-skype:before { content: $fa-var-skype; }
+.#{$fa-css-prefix}-foursquare:before { content: $fa-var-foursquare; }
+.#{$fa-css-prefix}-trello:before { content: $fa-var-trello; }
+.#{$fa-css-prefix}-female:before { content: $fa-var-female; }
+.#{$fa-css-prefix}-male:before { content: $fa-var-male; }
+.#{$fa-css-prefix}-gittip:before,
+.#{$fa-css-prefix}-gratipay:before { content: $fa-var-gratipay; }
+.#{$fa-css-prefix}-sun-o:before { content: $fa-var-sun-o; }
+.#{$fa-css-prefix}-moon-o:before { content: $fa-var-moon-o; }
+.#{$fa-css-prefix}-archive:before { content: $fa-var-archive; }
+.#{$fa-css-prefix}-bug:before { content: $fa-var-bug; }
+.#{$fa-css-prefix}-vk:before { content: $fa-var-vk; }
+.#{$fa-css-prefix}-weibo:before { content: $fa-var-weibo; }
+.#{$fa-css-prefix}-renren:before { content: $fa-var-renren; }
+.#{$fa-css-prefix}-pagelines:before { content: $fa-var-pagelines; }
+.#{$fa-css-prefix}-stack-exchange:before { content: $fa-var-stack-exchange; }
+.#{$fa-css-prefix}-arrow-circle-o-right:before { content: $fa-var-arrow-circle-o-right; }
+.#{$fa-css-prefix}-arrow-circle-o-left:before { content: $fa-var-arrow-circle-o-left; }
+.#{$fa-css-prefix}-toggle-left:before,
+.#{$fa-css-prefix}-caret-square-o-left:before { content: $fa-var-caret-square-o-left; }
+.#{$fa-css-prefix}-dot-circle-o:before { content: $fa-var-dot-circle-o; }
+.#{$fa-css-prefix}-wheelchair:before { content: $fa-var-wheelchair; }
+.#{$fa-css-prefix}-vimeo-square:before { content: $fa-var-vimeo-square; }
+.#{$fa-css-prefix}-turkish-lira:before,
+.#{$fa-css-prefix}-try:before { content: $fa-var-try; }
+.#{$fa-css-prefix}-plus-square-o:before { content: $fa-var-plus-square-o; }
+.#{$fa-css-prefix}-space-shuttle:before { content: $fa-var-space-shuttle; }
+.#{$fa-css-prefix}-slack:before { content: $fa-var-slack; }
+.#{$fa-css-prefix}-envelope-square:before { content: $fa-var-envelope-square; }
+.#{$fa-css-prefix}-wordpress:before { content: $fa-var-wordpress; }
+.#{$fa-css-prefix}-openid:before { content: $fa-var-openid; }
+.#{$fa-css-prefix}-institution:before,
+.#{$fa-css-prefix}-bank:before,
+.#{$fa-css-prefix}-university:before { content: $fa-var-university; }
+.#{$fa-css-prefix}-mortar-board:before,
+.#{$fa-css-prefix}-graduation-cap:before { content: $fa-var-graduation-cap; }
+.#{$fa-css-prefix}-yahoo:before { content: $fa-var-yahoo; }
+.#{$fa-css-prefix}-google:before { content: $fa-var-google; }
+.#{$fa-css-prefix}-reddit:before { content: $fa-var-reddit; }
+.#{$fa-css-prefix}-reddit-square:before { content: $fa-var-reddit-square; }
+.#{$fa-css-prefix}-stumbleupon-circle:before { content: $fa-var-stumbleupon-circle; }
+.#{$fa-css-prefix}-stumbleupon:before { content: $fa-var-stumbleupon; }
+.#{$fa-css-prefix}-delicious:before { content: $fa-var-delicious; }
+.#{$fa-css-prefix}-digg:before { content: $fa-var-digg; }
+.#{$fa-css-prefix}-pied-piper:before { content: $fa-var-pied-piper; }
+.#{$fa-css-prefix}-pied-piper-alt:before { content: $fa-var-pied-piper-alt; }
+.#{$fa-css-prefix}-drupal:before { content: $fa-var-drupal; }
+.#{$fa-css-prefix}-joomla:before { content: $fa-var-joomla; }
+.#{$fa-css-prefix}-language:before { content: $fa-var-language; }
+.#{$fa-css-prefix}-fax:before { content: $fa-var-fax; }
+.#{$fa-css-prefix}-building:before { content: $fa-var-building; }
+.#{$fa-css-prefix}-child:before { content: $fa-var-child; }
+.#{$fa-css-prefix}-paw:before { content: $fa-var-paw; }
+.#{$fa-css-prefix}-spoon:before { content: $fa-var-spoon; }
+.#{$fa-css-prefix}-cube:before { content: $fa-var-cube; }
+.#{$fa-css-prefix}-cubes:before { content: $fa-var-cubes; }
+.#{$fa-css-prefix}-behance:before { content: $fa-var-behance; }
+.#{$fa-css-prefix}-behance-square:before { content: $fa-var-behance-square; }
+.#{$fa-css-prefix}-steam:before { content: $fa-var-steam; }
+.#{$fa-css-prefix}-steam-square:before { content: $fa-var-steam-square; }
+.#{$fa-css-prefix}-recycle:before { content: $fa-var-recycle; }
+.#{$fa-css-prefix}-automobile:before,
+.#{$fa-css-prefix}-car:before { content: $fa-var-car; }
+.#{$fa-css-prefix}-cab:before,
+.#{$fa-css-prefix}-taxi:before { content: $fa-var-taxi; }
+.#{$fa-css-prefix}-tree:before { content: $fa-var-tree; }
+.#{$fa-css-prefix}-spotify:before { content: $fa-var-spotify; }
+.#{$fa-css-prefix}-deviantart:before { content: $fa-var-deviantart; }
+.#{$fa-css-prefix}-soundcloud:before { content: $fa-var-soundcloud; }
+.#{$fa-css-prefix}-database:before { content: $fa-var-database; }
+.#{$fa-css-prefix}-file-pdf-o:before { content: $fa-var-file-pdf-o; }
+.#{$fa-css-prefix}-file-word-o:before { content: $fa-var-file-word-o; }
+.#{$fa-css-prefix}-file-excel-o:before { content: $fa-var-file-excel-o; }
+.#{$fa-css-prefix}-file-powerpoint-o:before { content: $fa-var-file-powerpoint-o; }
+.#{$fa-css-prefix}-file-photo-o:before,
+.#{$fa-css-prefix}-file-picture-o:before,
+.#{$fa-css-prefix}-file-image-o:before { content: $fa-var-file-image-o; }
+.#{$fa-css-prefix}-file-zip-o:before,
+.#{$fa-css-prefix}-file-archive-o:before { content: $fa-var-file-archive-o; }
+.#{$fa-css-prefix}-file-sound-o:before,
+.#{$fa-css-prefix}-file-audio-o:before { content: $fa-var-file-audio-o; }
+.#{$fa-css-prefix}-file-movie-o:before,
+.#{$fa-css-prefix}-file-video-o:before { content: $fa-var-file-video-o; }
+.#{$fa-css-prefix}-file-code-o:before { content: $fa-var-file-code-o; }
+.#{$fa-css-prefix}-vine:before { content: $fa-var-vine; }
+.#{$fa-css-prefix}-codepen:before { content: $fa-var-codepen; }
+.#{$fa-css-prefix}-jsfiddle:before { content: $fa-var-jsfiddle; }
+.#{$fa-css-prefix}-life-bouy:before,
+.#{$fa-css-prefix}-life-buoy:before,
+.#{$fa-css-prefix}-life-saver:before,
+.#{$fa-css-prefix}-support:before,
+.#{$fa-css-prefix}-life-ring:before { content: $fa-var-life-ring; }
+.#{$fa-css-prefix}-circle-o-notch:before { content: $fa-var-circle-o-notch; }
+.#{$fa-css-prefix}-ra:before,
+.#{$fa-css-prefix}-rebel:before { content: $fa-var-rebel; }
+.#{$fa-css-prefix}-ge:before,
+.#{$fa-css-prefix}-empire:before { content: $fa-var-empire; }
+.#{$fa-css-prefix}-git-square:before { content: $fa-var-git-square; }
+.#{$fa-css-prefix}-git:before { content: $fa-var-git; }
+.#{$fa-css-prefix}-y-combinator-square:before,
+.#{$fa-css-prefix}-yc-square:before,
+.#{$fa-css-prefix}-hacker-news:before { content: $fa-var-hacker-news; }
+.#{$fa-css-prefix}-tencent-weibo:before { content: $fa-var-tencent-weibo; }
+.#{$fa-css-prefix}-qq:before { content: $fa-var-qq; }
+.#{$fa-css-prefix}-wechat:before,
+.#{$fa-css-prefix}-weixin:before { content: $fa-var-weixin; }
+.#{$fa-css-prefix}-send:before,
+.#{$fa-css-prefix}-paper-plane:before { content: $fa-var-paper-plane; }
+.#{$fa-css-prefix}-send-o:before,
+.#{$fa-css-prefix}-paper-plane-o:before { content: $fa-var-paper-plane-o; }
+.#{$fa-css-prefix}-history:before { content: $fa-var-history; }
+.#{$fa-css-prefix}-circle-thin:before { content: $fa-var-circle-thin; }
+.#{$fa-css-prefix}-header:before { content: $fa-var-header; }
+.#{$fa-css-prefix}-paragraph:before { content: $fa-var-paragraph; }
+.#{$fa-css-prefix}-sliders:before { content: $fa-var-sliders; }
+.#{$fa-css-prefix}-share-alt:before { content: $fa-var-share-alt; }
+.#{$fa-css-prefix}-share-alt-square:before { content: $fa-var-share-alt-square; }
+.#{$fa-css-prefix}-bomb:before { content: $fa-var-bomb; }
+.#{$fa-css-prefix}-soccer-ball-o:before,
+.#{$fa-css-prefix}-futbol-o:before { content: $fa-var-futbol-o; }
+.#{$fa-css-prefix}-tty:before { content: $fa-var-tty; }
+.#{$fa-css-prefix}-binoculars:before { content: $fa-var-binoculars; }
+.#{$fa-css-prefix}-plug:before { content: $fa-var-plug; }
+.#{$fa-css-prefix}-slideshare:before { content: $fa-var-slideshare; }
+.#{$fa-css-prefix}-twitch:before { content: $fa-var-twitch; }
+.#{$fa-css-prefix}-yelp:before { content: $fa-var-yelp; }
+.#{$fa-css-prefix}-newspaper-o:before { content: $fa-var-newspaper-o; }
+.#{$fa-css-prefix}-wifi:before { content: $fa-var-wifi; }
+.#{$fa-css-prefix}-calculator:before { content: $fa-var-calculator; }
+.#{$fa-css-prefix}-paypal:before { content: $fa-var-paypal; }
+.#{$fa-css-prefix}-google-wallet:before { content: $fa-var-google-wallet; }
+.#{$fa-css-prefix}-cc-visa:before { content: $fa-var-cc-visa; }
+.#{$fa-css-prefix}-cc-mastercard:before { content: $fa-var-cc-mastercard; }
+.#{$fa-css-prefix}-cc-discover:before { content: $fa-var-cc-discover; }
+.#{$fa-css-prefix}-cc-amex:before { content: $fa-var-cc-amex; }
+.#{$fa-css-prefix}-cc-paypal:before { content: $fa-var-cc-paypal; }
+.#{$fa-css-prefix}-cc-stripe:before { content: $fa-var-cc-stripe; }
+.#{$fa-css-prefix}-bell-slash:before { content: $fa-var-bell-slash; }
+.#{$fa-css-prefix}-bell-slash-o:before { content: $fa-var-bell-slash-o; }
+.#{$fa-css-prefix}-trash:before { content: $fa-var-trash; }
+.#{$fa-css-prefix}-copyright:before { content: $fa-var-copyright; }
+.#{$fa-css-prefix}-at:before { content: $fa-var-at; }
+.#{$fa-css-prefix}-eyedropper:before { content: $fa-var-eyedropper; }
+.#{$fa-css-prefix}-paint-brush:before { content: $fa-var-paint-brush; }
+.#{$fa-css-prefix}-birthday-cake:before { content: $fa-var-birthday-cake; }
+.#{$fa-css-prefix}-area-chart:before { content: $fa-var-area-chart; }
+.#{$fa-css-prefix}-pie-chart:before { content: $fa-var-pie-chart; }
+.#{$fa-css-prefix}-line-chart:before { content: $fa-var-line-chart; }
+.#{$fa-css-prefix}-lastfm:before { content: $fa-var-lastfm; }
+.#{$fa-css-prefix}-lastfm-square:before { content: $fa-var-lastfm-square; }
+.#{$fa-css-prefix}-toggle-off:before { content: $fa-var-toggle-off; }
+.#{$fa-css-prefix}-toggle-on:before { content: $fa-var-toggle-on; }
+.#{$fa-css-prefix}-bicycle:before { content: $fa-var-bicycle; }
+.#{$fa-css-prefix}-bus:before { content: $fa-var-bus; }
+.#{$fa-css-prefix}-ioxhost:before { content: $fa-var-ioxhost; }
+.#{$fa-css-prefix}-angellist:before { content: $fa-var-angellist; }
+.#{$fa-css-prefix}-cc:before { content: $fa-var-cc; }
+.#{$fa-css-prefix}-shekel:before,
+.#{$fa-css-prefix}-sheqel:before,
+.#{$fa-css-prefix}-ils:before { content: $fa-var-ils; }
+.#{$fa-css-prefix}-meanpath:before { content: $fa-var-meanpath; }
+.#{$fa-css-prefix}-buysellads:before { content: $fa-var-buysellads; }
+.#{$fa-css-prefix}-connectdevelop:before { content: $fa-var-connectdevelop; }
+.#{$fa-css-prefix}-dashcube:before { content: $fa-var-dashcube; }
+.#{$fa-css-prefix}-forumbee:before { content: $fa-var-forumbee; }
+.#{$fa-css-prefix}-leanpub:before { content: $fa-var-leanpub; }
+.#{$fa-css-prefix}-sellsy:before { content: $fa-var-sellsy; }
+.#{$fa-css-prefix}-shirtsinbulk:before { content: $fa-var-shirtsinbulk; }
+.#{$fa-css-prefix}-simplybuilt:before { content: $fa-var-simplybuilt; }
+.#{$fa-css-prefix}-skyatlas:before { content: $fa-var-skyatlas; }
+.#{$fa-css-prefix}-cart-plus:before { content: $fa-var-cart-plus; }
+.#{$fa-css-prefix}-cart-arrow-down:before { content: $fa-var-cart-arrow-down; }
+.#{$fa-css-prefix}-diamond:before { content: $fa-var-diamond; }
+.#{$fa-css-prefix}-ship:before { content: $fa-var-ship; }
+.#{$fa-css-prefix}-user-secret:before { content: $fa-var-user-secret; }
+.#{$fa-css-prefix}-motorcycle:before { content: $fa-var-motorcycle; }
+.#{$fa-css-prefix}-street-view:before { content: $fa-var-street-view; }
+.#{$fa-css-prefix}-heartbeat:before { content: $fa-var-heartbeat; }
+.#{$fa-css-prefix}-venus:before { content: $fa-var-venus; }
+.#{$fa-css-prefix}-mars:before { content: $fa-var-mars; }
+.#{$fa-css-prefix}-mercury:before { content: $fa-var-mercury; }
+.#{$fa-css-prefix}-intersex:before,
+.#{$fa-css-prefix}-transgender:before { content: $fa-var-transgender; }
+.#{$fa-css-prefix}-transgender-alt:before { content: $fa-var-transgender-alt; }
+.#{$fa-css-prefix}-venus-double:before { content: $fa-var-venus-double; }
+.#{$fa-css-prefix}-mars-double:before { content: $fa-var-mars-double; }
+.#{$fa-css-prefix}-venus-mars:before { content: $fa-var-venus-mars; }
+.#{$fa-css-prefix}-mars-stroke:before { content: $fa-var-mars-stroke; }
+.#{$fa-css-prefix}-mars-stroke-v:before { content: $fa-var-mars-stroke-v; }
+.#{$fa-css-prefix}-mars-stroke-h:before { content: $fa-var-mars-stroke-h; }
+.#{$fa-css-prefix}-neuter:before { content: $fa-var-neuter; }
+.#{$fa-css-prefix}-genderless:before { content: $fa-var-genderless; }
+.#{$fa-css-prefix}-facebook-official:before { content: $fa-var-facebook-official; }
+.#{$fa-css-prefix}-pinterest-p:before { content: $fa-var-pinterest-p; }
+.#{$fa-css-prefix}-whatsapp:before { content: $fa-var-whatsapp; }
+.#{$fa-css-prefix}-server:before { content: $fa-var-server; }
+.#{$fa-css-prefix}-user-plus:before { content: $fa-var-user-plus; }
+.#{$fa-css-prefix}-user-times:before { content: $fa-var-user-times; }
+.#{$fa-css-prefix}-hotel:before,
+.#{$fa-css-prefix}-bed:before { content: $fa-var-bed; }
+.#{$fa-css-prefix}-viacoin:before { content: $fa-var-viacoin; }
+.#{$fa-css-prefix}-train:before { content: $fa-var-train; }
+.#{$fa-css-prefix}-subway:before { content: $fa-var-subway; }
+.#{$fa-css-prefix}-medium:before { content: $fa-var-medium; }
+.#{$fa-css-prefix}-yc:before,
+.#{$fa-css-prefix}-y-combinator:before { content: $fa-var-y-combinator; }
+.#{$fa-css-prefix}-optin-monster:before { content: $fa-var-optin-monster; }
+.#{$fa-css-prefix}-opencart:before { content: $fa-var-opencart; }
+.#{$fa-css-prefix}-expeditedssl:before { content: $fa-var-expeditedssl; }
+.#{$fa-css-prefix}-battery-4:before,
+.#{$fa-css-prefix}-battery-full:before { content: $fa-var-battery-full; }
+.#{$fa-css-prefix}-battery-3:before,
+.#{$fa-css-prefix}-battery-three-quarters:before { content: $fa-var-battery-three-quarters; }
+.#{$fa-css-prefix}-battery-2:before,
+.#{$fa-css-prefix}-battery-half:before { content: $fa-var-battery-half; }
+.#{$fa-css-prefix}-battery-1:before,
+.#{$fa-css-prefix}-battery-quarter:before { content: $fa-var-battery-quarter; }
+.#{$fa-css-prefix}-battery-0:before,
+.#{$fa-css-prefix}-battery-empty:before { content: $fa-var-battery-empty; }
+.#{$fa-css-prefix}-mouse-pointer:before { content: $fa-var-mouse-pointer; }
+.#{$fa-css-prefix}-i-cursor:before { content: $fa-var-i-cursor; }
+.#{$fa-css-prefix}-object-group:before { content: $fa-var-object-group; }
+.#{$fa-css-prefix}-object-ungroup:before { content: $fa-var-object-ungroup; }
+.#{$fa-css-prefix}-sticky-note:before { content: $fa-var-sticky-note; }
+.#{$fa-css-prefix}-sticky-note-o:before { content: $fa-var-sticky-note-o; }
+.#{$fa-css-prefix}-cc-jcb:before { content: $fa-var-cc-jcb; }
+.#{$fa-css-prefix}-cc-diners-club:before { content: $fa-var-cc-diners-club; }
+.#{$fa-css-prefix}-clone:before { content: $fa-var-clone; }
+.#{$fa-css-prefix}-balance-scale:before { content: $fa-var-balance-scale; }
+.#{$fa-css-prefix}-hourglass-o:before { content: $fa-var-hourglass-o; }
+.#{$fa-css-prefix}-hourglass-1:before,
+.#{$fa-css-prefix}-hourglass-start:before { content: $fa-var-hourglass-start; }
+.#{$fa-css-prefix}-hourglass-2:before,
+.#{$fa-css-prefix}-hourglass-half:before { content: $fa-var-hourglass-half; }
+.#{$fa-css-prefix}-hourglass-3:before,
+.#{$fa-css-prefix}-hourglass-end:before { content: $fa-var-hourglass-end; }
+.#{$fa-css-prefix}-hourglass:before { content: $fa-var-hourglass; }
+.#{$fa-css-prefix}-hand-grab-o:before,
+.#{$fa-css-prefix}-hand-rock-o:before { content: $fa-var-hand-rock-o; }
+.#{$fa-css-prefix}-hand-stop-o:before,
+.#{$fa-css-prefix}-hand-paper-o:before { content: $fa-var-hand-paper-o; }
+.#{$fa-css-prefix}-hand-scissors-o:before { content: $fa-var-hand-scissors-o; }
+.#{$fa-css-prefix}-hand-lizard-o:before { content: $fa-var-hand-lizard-o; }
+.#{$fa-css-prefix}-hand-spock-o:before { content: $fa-var-hand-spock-o; }
+.#{$fa-css-prefix}-hand-pointer-o:before { content: $fa-var-hand-pointer-o; }
+.#{$fa-css-prefix}-hand-peace-o:before { content: $fa-var-hand-peace-o; }
+.#{$fa-css-prefix}-trademark:before { content: $fa-var-trademark; }
+.#{$fa-css-prefix}-registered:before { content: $fa-var-registered; }
+.#{$fa-css-prefix}-creative-commons:before { content: $fa-var-creative-commons; }
+.#{$fa-css-prefix}-gg:before { content: $fa-var-gg; }
+.#{$fa-css-prefix}-gg-circle:before { content: $fa-var-gg-circle; }
+.#{$fa-css-prefix}-tripadvisor:before { content: $fa-var-tripadvisor; }
+.#{$fa-css-prefix}-odnoklassniki:before { content: $fa-var-odnoklassniki; }
+.#{$fa-css-prefix}-odnoklassniki-square:before { content: $fa-var-odnoklassniki-square; }
+.#{$fa-css-prefix}-get-pocket:before { content: $fa-var-get-pocket; }
+.#{$fa-css-prefix}-wikipedia-w:before { content: $fa-var-wikipedia-w; }
+.#{$fa-css-prefix}-safari:before { content: $fa-var-safari; }
+.#{$fa-css-prefix}-chrome:before { content: $fa-var-chrome; }
+.#{$fa-css-prefix}-firefox:before { content: $fa-var-firefox; }
+.#{$fa-css-prefix}-opera:before { content: $fa-var-opera; }
+.#{$fa-css-prefix}-internet-explorer:before { content: $fa-var-internet-explorer; }
+.#{$fa-css-prefix}-tv:before,
+.#{$fa-css-prefix}-television:before { content: $fa-var-television; }
+.#{$fa-css-prefix}-contao:before { content: $fa-var-contao; }
+.#{$fa-css-prefix}-500px:before { content: $fa-var-500px; }
+.#{$fa-css-prefix}-amazon:before { content: $fa-var-amazon; }
+.#{$fa-css-prefix}-calendar-plus-o:before { content: $fa-var-calendar-plus-o; }
+.#{$fa-css-prefix}-calendar-minus-o:before { content: $fa-var-calendar-minus-o; }
+.#{$fa-css-prefix}-calendar-times-o:before { content: $fa-var-calendar-times-o; }
+.#{$fa-css-prefix}-calendar-check-o:before { content: $fa-var-calendar-check-o; }
+.#{$fa-css-prefix}-industry:before { content: $fa-var-industry; }
+.#{$fa-css-prefix}-map-pin:before { content: $fa-var-map-pin; }
+.#{$fa-css-prefix}-map-signs:before { content: $fa-var-map-signs; }
+.#{$fa-css-prefix}-map-o:before { content: $fa-var-map-o; }
+.#{$fa-css-prefix}-map:before { content: $fa-var-map; }
+.#{$fa-css-prefix}-commenting:before { content: $fa-var-commenting; }
+.#{$fa-css-prefix}-commenting-o:before { content: $fa-var-commenting-o; }
+.#{$fa-css-prefix}-houzz:before { content: $fa-var-houzz; }
+.#{$fa-css-prefix}-vimeo:before { content: $fa-var-vimeo; }
+.#{$fa-css-prefix}-black-tie:before { content: $fa-var-black-tie; }
+.#{$fa-css-prefix}-fonticons:before { content: $fa-var-fonticons; }
+.#{$fa-css-prefix}-reddit-alien:before { content: $fa-var-reddit-alien; }
+.#{$fa-css-prefix}-edge:before { content: $fa-var-edge; }
+.#{$fa-css-prefix}-credit-card-alt:before { content: $fa-var-credit-card-alt; }
+.#{$fa-css-prefix}-codiepie:before { content: $fa-var-codiepie; }
+.#{$fa-css-prefix}-modx:before { content: $fa-var-modx; }
+.#{$fa-css-prefix}-fort-awesome:before { content: $fa-var-fort-awesome; }
+.#{$fa-css-prefix}-usb:before { content: $fa-var-usb; }
+.#{$fa-css-prefix}-product-hunt:before { content: $fa-var-product-hunt; }
+.#{$fa-css-prefix}-mixcloud:before { content: $fa-var-mixcloud; }
+.#{$fa-css-prefix}-scribd:before { content: $fa-var-scribd; }
+.#{$fa-css-prefix}-pause-circle:before { content: $fa-var-pause-circle; }
+.#{$fa-css-prefix}-pause-circle-o:before { content: $fa-var-pause-circle-o; }
+.#{$fa-css-prefix}-stop-circle:before { content: $fa-var-stop-circle; }
+.#{$fa-css-prefix}-stop-circle-o:before { content: $fa-var-stop-circle-o; }
+.#{$fa-css-prefix}-shopping-bag:before { content: $fa-var-shopping-bag; }
+.#{$fa-css-prefix}-shopping-basket:before { content: $fa-var-shopping-basket; }
+.#{$fa-css-prefix}-hashtag:before { content: $fa-var-hashtag; }
+.#{$fa-css-prefix}-bluetooth:before { content: $fa-var-bluetooth; }
+.#{$fa-css-prefix}-bluetooth-b:before { content: $fa-var-bluetooth-b; }
+.#{$fa-css-prefix}-percent:before { content: $fa-var-percent; }
diff --git a/asset/static/fonts/scss/_larger.scss b/asset/static/fonts/scss/_larger.scss
new file mode 100755
index 0000000..41e9a81
--- /dev/null
+++ b/asset/static/fonts/scss/_larger.scss
@@ -0,0 +1,13 @@
+// Icon Sizes
+// -------------------------
+
+/* makes the font 33% larger relative to the icon container */
+.#{$fa-css-prefix}-lg {
+ font-size: (4em / 3);
+ line-height: (3em / 4);
+ vertical-align: -15%;
+}
+.#{$fa-css-prefix}-2x { font-size: 2em; }
+.#{$fa-css-prefix}-3x { font-size: 3em; }
+.#{$fa-css-prefix}-4x { font-size: 4em; }
+.#{$fa-css-prefix}-5x { font-size: 5em; }
diff --git a/asset/static/fonts/scss/_list.scss b/asset/static/fonts/scss/_list.scss
new file mode 100755
index 0000000..7d1e4d5
--- /dev/null
+++ b/asset/static/fonts/scss/_list.scss
@@ -0,0 +1,19 @@
+// List Icons
+// -------------------------
+
+.#{$fa-css-prefix}-ul {
+ padding-left: 0;
+ margin-left: $fa-li-width;
+ list-style-type: none;
+ > li { position: relative; }
+}
+.#{$fa-css-prefix}-li {
+ position: absolute;
+ left: -$fa-li-width;
+ width: $fa-li-width;
+ top: (2em / 14);
+ text-align: center;
+ &.#{$fa-css-prefix}-lg {
+ left: -$fa-li-width + (4em / 14);
+ }
+}
diff --git a/asset/static/fonts/scss/_mixins.scss b/asset/static/fonts/scss/_mixins.scss
new file mode 100755
index 0000000..f96719b
--- /dev/null
+++ b/asset/static/fonts/scss/_mixins.scss
@@ -0,0 +1,26 @@
+// Mixins
+// --------------------------
+
+@mixin fa-icon() {
+ display: inline-block;
+ font: normal normal normal #{$fa-font-size-base}/#{$fa-line-height-base} FontAwesome; // shortening font declaration
+ font-size: inherit; // can't have font-size inherit on line above, so need to override
+ text-rendering: auto; // optimizelegibility throws things off #1094
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+
+}
+
+@mixin fa-icon-rotate($degrees, $rotation) {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation});
+ -webkit-transform: rotate($degrees);
+ -ms-transform: rotate($degrees);
+ transform: rotate($degrees);
+}
+
+@mixin fa-icon-flip($horiz, $vert, $rotation) {
+ filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation});
+ -webkit-transform: scale($horiz, $vert);
+ -ms-transform: scale($horiz, $vert);
+ transform: scale($horiz, $vert);
+}
diff --git a/asset/static/fonts/scss/_path.scss b/asset/static/fonts/scss/_path.scss
new file mode 100755
index 0000000..bb457c2
--- /dev/null
+++ b/asset/static/fonts/scss/_path.scss
@@ -0,0 +1,15 @@
+/* FONT PATH
+ * -------------------------- */
+
+@font-face {
+ font-family: 'FontAwesome';
+ src: url('#{$fa-font-path}/fontawesome-webfont.eot?v=#{$fa-version}');
+ src: url('#{$fa-font-path}/fontawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'),
+ url('#{$fa-font-path}/fontawesome-webfont.woff2?v=#{$fa-version}') format('woff2'),
+ url('#{$fa-font-path}/fontawesome-webfont.woff?v=#{$fa-version}') format('woff'),
+ url('#{$fa-font-path}/fontawesome-webfont.ttf?v=#{$fa-version}') format('truetype'),
+ url('#{$fa-font-path}/fontawesome-webfont.svg?v=#{$fa-version}#fontawesomeregular') format('svg');
+// src: url('#{$fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts
+ font-weight: normal;
+ font-style: normal;
+}
diff --git a/asset/static/fonts/scss/_rotated-flipped.scss b/asset/static/fonts/scss/_rotated-flipped.scss
new file mode 100755
index 0000000..a3558fd
--- /dev/null
+++ b/asset/static/fonts/scss/_rotated-flipped.scss
@@ -0,0 +1,20 @@
+// Rotated & Flipped Icons
+// -------------------------
+
+.#{$fa-css-prefix}-rotate-90 { @include fa-icon-rotate(90deg, 1); }
+.#{$fa-css-prefix}-rotate-180 { @include fa-icon-rotate(180deg, 2); }
+.#{$fa-css-prefix}-rotate-270 { @include fa-icon-rotate(270deg, 3); }
+
+.#{$fa-css-prefix}-flip-horizontal { @include fa-icon-flip(-1, 1, 0); }
+.#{$fa-css-prefix}-flip-vertical { @include fa-icon-flip(1, -1, 2); }
+
+// Hook for IE8-9
+// -------------------------
+
+:root .#{$fa-css-prefix}-rotate-90,
+:root .#{$fa-css-prefix}-rotate-180,
+:root .#{$fa-css-prefix}-rotate-270,
+:root .#{$fa-css-prefix}-flip-horizontal,
+:root .#{$fa-css-prefix}-flip-vertical {
+ filter: none;
+}
diff --git a/asset/static/fonts/scss/_spinning.scss b/asset/static/fonts/scss/_spinning.scss
new file mode 100755
index 0000000..002c5d5
--- /dev/null
+++ b/asset/static/fonts/scss/_spinning.scss
@@ -0,0 +1,29 @@
+// Spinning Icons
+// --------------------------
+
+.#{$fa-css-prefix}-spin {
+ -webkit-animation: fa-spin 2s infinite linear;
+ animation: fa-spin 2s infinite linear;
+}
+
+@-webkit-keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
+
+@keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg);
+ }
+ 100% {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg);
+ }
+}
diff --git a/asset/static/fonts/scss/_stacked.scss b/asset/static/fonts/scss/_stacked.scss
new file mode 100755
index 0000000..aef7403
--- /dev/null
+++ b/asset/static/fonts/scss/_stacked.scss
@@ -0,0 +1,20 @@
+// Stacked Icons
+// -------------------------
+
+.#{$fa-css-prefix}-stack {
+ position: relative;
+ display: inline-block;
+ width: 2em;
+ height: 2em;
+ line-height: 2em;
+ vertical-align: middle;
+}
+.#{$fa-css-prefix}-stack-1x, .#{$fa-css-prefix}-stack-2x {
+ position: absolute;
+ left: 0;
+ width: 100%;
+ text-align: center;
+}
+.#{$fa-css-prefix}-stack-1x { line-height: inherit; }
+.#{$fa-css-prefix}-stack-2x { font-size: 2em; }
+.#{$fa-css-prefix}-inverse { color: $fa-inverse; }
diff --git a/asset/static/fonts/scss/_variables.scss b/asset/static/fonts/scss/_variables.scss
new file mode 100755
index 0000000..0a47110
--- /dev/null
+++ b/asset/static/fonts/scss/_variables.scss
@@ -0,0 +1,708 @@
+// Variables
+// --------------------------
+
+$fa-font-path: "../fonts" !default;
+$fa-font-size-base: 14px !default;
+$fa-line-height-base: 1 !default;
+//$fa-font-path: "//netdna.bootstrapcdn.com/font-awesome/4.5.0/fonts" !default; // for referencing Bootstrap CDN font files directly
+$fa-css-prefix: fa !default;
+$fa-version: "4.5.0" !default;
+$fa-border-color: #eee !default;
+$fa-inverse: #fff !default;
+$fa-li-width: (30em / 14) !default;
+
+$fa-var-500px: "\f26e";
+$fa-var-adjust: "\f042";
+$fa-var-adn: "\f170";
+$fa-var-align-center: "\f037";
+$fa-var-align-justify: "\f039";
+$fa-var-align-left: "\f036";
+$fa-var-align-right: "\f038";
+$fa-var-amazon: "\f270";
+$fa-var-ambulance: "\f0f9";
+$fa-var-anchor: "\f13d";
+$fa-var-android: "\f17b";
+$fa-var-angellist: "\f209";
+$fa-var-angle-double-down: "\f103";
+$fa-var-angle-double-left: "\f100";
+$fa-var-angle-double-right: "\f101";
+$fa-var-angle-double-up: "\f102";
+$fa-var-angle-down: "\f107";
+$fa-var-angle-left: "\f104";
+$fa-var-angle-right: "\f105";
+$fa-var-angle-up: "\f106";
+$fa-var-apple: "\f179";
+$fa-var-archive: "\f187";
+$fa-var-area-chart: "\f1fe";
+$fa-var-arrow-circle-down: "\f0ab";
+$fa-var-arrow-circle-left: "\f0a8";
+$fa-var-arrow-circle-o-down: "\f01a";
+$fa-var-arrow-circle-o-left: "\f190";
+$fa-var-arrow-circle-o-right: "\f18e";
+$fa-var-arrow-circle-o-up: "\f01b";
+$fa-var-arrow-circle-right: "\f0a9";
+$fa-var-arrow-circle-up: "\f0aa";
+$fa-var-arrow-down: "\f063";
+$fa-var-arrow-left: "\f060";
+$fa-var-arrow-right: "\f061";
+$fa-var-arrow-up: "\f062";
+$fa-var-arrows: "\f047";
+$fa-var-arrows-alt: "\f0b2";
+$fa-var-arrows-h: "\f07e";
+$fa-var-arrows-v: "\f07d";
+$fa-var-asterisk: "\f069";
+$fa-var-at: "\f1fa";
+$fa-var-automobile: "\f1b9";
+$fa-var-backward: "\f04a";
+$fa-var-balance-scale: "\f24e";
+$fa-var-ban: "\f05e";
+$fa-var-bank: "\f19c";
+$fa-var-bar-chart: "\f080";
+$fa-var-bar-chart-o: "\f080";
+$fa-var-barcode: "\f02a";
+$fa-var-bars: "\f0c9";
+$fa-var-battery-0: "\f244";
+$fa-var-battery-1: "\f243";
+$fa-var-battery-2: "\f242";
+$fa-var-battery-3: "\f241";
+$fa-var-battery-4: "\f240";
+$fa-var-battery-empty: "\f244";
+$fa-var-battery-full: "\f240";
+$fa-var-battery-half: "\f242";
+$fa-var-battery-quarter: "\f243";
+$fa-var-battery-three-quarters: "\f241";
+$fa-var-bed: "\f236";
+$fa-var-beer: "\f0fc";
+$fa-var-behance: "\f1b4";
+$fa-var-behance-square: "\f1b5";
+$fa-var-bell: "\f0f3";
+$fa-var-bell-o: "\f0a2";
+$fa-var-bell-slash: "\f1f6";
+$fa-var-bell-slash-o: "\f1f7";
+$fa-var-bicycle: "\f206";
+$fa-var-binoculars: "\f1e5";
+$fa-var-birthday-cake: "\f1fd";
+$fa-var-bitbucket: "\f171";
+$fa-var-bitbucket-square: "\f172";
+$fa-var-bitcoin: "\f15a";
+$fa-var-black-tie: "\f27e";
+$fa-var-bluetooth: "\f293";
+$fa-var-bluetooth-b: "\f294";
+$fa-var-bold: "\f032";
+$fa-var-bolt: "\f0e7";
+$fa-var-bomb: "\f1e2";
+$fa-var-book: "\f02d";
+$fa-var-bookmark: "\f02e";
+$fa-var-bookmark-o: "\f097";
+$fa-var-briefcase: "\f0b1";
+$fa-var-btc: "\f15a";
+$fa-var-bug: "\f188";
+$fa-var-building: "\f1ad";
+$fa-var-building-o: "\f0f7";
+$fa-var-bullhorn: "\f0a1";
+$fa-var-bullseye: "\f140";
+$fa-var-bus: "\f207";
+$fa-var-buysellads: "\f20d";
+$fa-var-cab: "\f1ba";
+$fa-var-calculator: "\f1ec";
+$fa-var-calendar: "\f073";
+$fa-var-calendar-check-o: "\f274";
+$fa-var-calendar-minus-o: "\f272";
+$fa-var-calendar-o: "\f133";
+$fa-var-calendar-plus-o: "\f271";
+$fa-var-calendar-times-o: "\f273";
+$fa-var-camera: "\f030";
+$fa-var-camera-retro: "\f083";
+$fa-var-car: "\f1b9";
+$fa-var-caret-down: "\f0d7";
+$fa-var-caret-left: "\f0d9";
+$fa-var-caret-right: "\f0da";
+$fa-var-caret-square-o-down: "\f150";
+$fa-var-caret-square-o-left: "\f191";
+$fa-var-caret-square-o-right: "\f152";
+$fa-var-caret-square-o-up: "\f151";
+$fa-var-caret-up: "\f0d8";
+$fa-var-cart-arrow-down: "\f218";
+$fa-var-cart-plus: "\f217";
+$fa-var-cc: "\f20a";
+$fa-var-cc-amex: "\f1f3";
+$fa-var-cc-diners-club: "\f24c";
+$fa-var-cc-discover: "\f1f2";
+$fa-var-cc-jcb: "\f24b";
+$fa-var-cc-mastercard: "\f1f1";
+$fa-var-cc-paypal: "\f1f4";
+$fa-var-cc-stripe: "\f1f5";
+$fa-var-cc-visa: "\f1f0";
+$fa-var-certificate: "\f0a3";
+$fa-var-chain: "\f0c1";
+$fa-var-chain-broken: "\f127";
+$fa-var-check: "\f00c";
+$fa-var-check-circle: "\f058";
+$fa-var-check-circle-o: "\f05d";
+$fa-var-check-square: "\f14a";
+$fa-var-check-square-o: "\f046";
+$fa-var-chevron-circle-down: "\f13a";
+$fa-var-chevron-circle-left: "\f137";
+$fa-var-chevron-circle-right: "\f138";
+$fa-var-chevron-circle-up: "\f139";
+$fa-var-chevron-down: "\f078";
+$fa-var-chevron-left: "\f053";
+$fa-var-chevron-right: "\f054";
+$fa-var-chevron-up: "\f077";
+$fa-var-child: "\f1ae";
+$fa-var-chrome: "\f268";
+$fa-var-circle: "\f111";
+$fa-var-circle-o: "\f10c";
+$fa-var-circle-o-notch: "\f1ce";
+$fa-var-circle-thin: "\f1db";
+$fa-var-clipboard: "\f0ea";
+$fa-var-clock-o: "\f017";
+$fa-var-clone: "\f24d";
+$fa-var-close: "\f00d";
+$fa-var-cloud: "\f0c2";
+$fa-var-cloud-download: "\f0ed";
+$fa-var-cloud-upload: "\f0ee";
+$fa-var-cny: "\f157";
+$fa-var-code: "\f121";
+$fa-var-code-fork: "\f126";
+$fa-var-codepen: "\f1cb";
+$fa-var-codiepie: "\f284";
+$fa-var-coffee: "\f0f4";
+$fa-var-cog: "\f013";
+$fa-var-cogs: "\f085";
+$fa-var-columns: "\f0db";
+$fa-var-comment: "\f075";
+$fa-var-comment-o: "\f0e5";
+$fa-var-commenting: "\f27a";
+$fa-var-commenting-o: "\f27b";
+$fa-var-comments: "\f086";
+$fa-var-comments-o: "\f0e6";
+$fa-var-compass: "\f14e";
+$fa-var-compress: "\f066";
+$fa-var-connectdevelop: "\f20e";
+$fa-var-contao: "\f26d";
+$fa-var-copy: "\f0c5";
+$fa-var-copyright: "\f1f9";
+$fa-var-creative-commons: "\f25e";
+$fa-var-credit-card: "\f09d";
+$fa-var-credit-card-alt: "\f283";
+$fa-var-crop: "\f125";
+$fa-var-crosshairs: "\f05b";
+$fa-var-css3: "\f13c";
+$fa-var-cube: "\f1b2";
+$fa-var-cubes: "\f1b3";
+$fa-var-cut: "\f0c4";
+$fa-var-cutlery: "\f0f5";
+$fa-var-dashboard: "\f0e4";
+$fa-var-dashcube: "\f210";
+$fa-var-database: "\f1c0";
+$fa-var-dedent: "\f03b";
+$fa-var-delicious: "\f1a5";
+$fa-var-desktop: "\f108";
+$fa-var-deviantart: "\f1bd";
+$fa-var-diamond: "\f219";
+$fa-var-digg: "\f1a6";
+$fa-var-dollar: "\f155";
+$fa-var-dot-circle-o: "\f192";
+$fa-var-download: "\f019";
+$fa-var-dribbble: "\f17d";
+$fa-var-dropbox: "\f16b";
+$fa-var-drupal: "\f1a9";
+$fa-var-edge: "\f282";
+$fa-var-edit: "\f044";
+$fa-var-eject: "\f052";
+$fa-var-ellipsis-h: "\f141";
+$fa-var-ellipsis-v: "\f142";
+$fa-var-empire: "\f1d1";
+$fa-var-envelope: "\f0e0";
+$fa-var-envelope-o: "\f003";
+$fa-var-envelope-square: "\f199";
+$fa-var-eraser: "\f12d";
+$fa-var-eur: "\f153";
+$fa-var-euro: "\f153";
+$fa-var-exchange: "\f0ec";
+$fa-var-exclamation: "\f12a";
+$fa-var-exclamation-circle: "\f06a";
+$fa-var-exclamation-triangle: "\f071";
+$fa-var-expand: "\f065";
+$fa-var-expeditedssl: "\f23e";
+$fa-var-external-link: "\f08e";
+$fa-var-external-link-square: "\f14c";
+$fa-var-eye: "\f06e";
+$fa-var-eye-slash: "\f070";
+$fa-var-eyedropper: "\f1fb";
+$fa-var-facebook: "\f09a";
+$fa-var-facebook-f: "\f09a";
+$fa-var-facebook-official: "\f230";
+$fa-var-facebook-square: "\f082";
+$fa-var-fast-backward: "\f049";
+$fa-var-fast-forward: "\f050";
+$fa-var-fax: "\f1ac";
+$fa-var-feed: "\f09e";
+$fa-var-female: "\f182";
+$fa-var-fighter-jet: "\f0fb";
+$fa-var-file: "\f15b";
+$fa-var-file-archive-o: "\f1c6";
+$fa-var-file-audio-o: "\f1c7";
+$fa-var-file-code-o: "\f1c9";
+$fa-var-file-excel-o: "\f1c3";
+$fa-var-file-image-o: "\f1c5";
+$fa-var-file-movie-o: "\f1c8";
+$fa-var-file-o: "\f016";
+$fa-var-file-pdf-o: "\f1c1";
+$fa-var-file-photo-o: "\f1c5";
+$fa-var-file-picture-o: "\f1c5";
+$fa-var-file-powerpoint-o: "\f1c4";
+$fa-var-file-sound-o: "\f1c7";
+$fa-var-file-text: "\f15c";
+$fa-var-file-text-o: "\f0f6";
+$fa-var-file-video-o: "\f1c8";
+$fa-var-file-word-o: "\f1c2";
+$fa-var-file-zip-o: "\f1c6";
+$fa-var-files-o: "\f0c5";
+$fa-var-film: "\f008";
+$fa-var-filter: "\f0b0";
+$fa-var-fire: "\f06d";
+$fa-var-fire-extinguisher: "\f134";
+$fa-var-firefox: "\f269";
+$fa-var-flag: "\f024";
+$fa-var-flag-checkered: "\f11e";
+$fa-var-flag-o: "\f11d";
+$fa-var-flash: "\f0e7";
+$fa-var-flask: "\f0c3";
+$fa-var-flickr: "\f16e";
+$fa-var-floppy-o: "\f0c7";
+$fa-var-folder: "\f07b";
+$fa-var-folder-o: "\f114";
+$fa-var-folder-open: "\f07c";
+$fa-var-folder-open-o: "\f115";
+$fa-var-font: "\f031";
+$fa-var-fonticons: "\f280";
+$fa-var-fort-awesome: "\f286";
+$fa-var-forumbee: "\f211";
+$fa-var-forward: "\f04e";
+$fa-var-foursquare: "\f180";
+$fa-var-frown-o: "\f119";
+$fa-var-futbol-o: "\f1e3";
+$fa-var-gamepad: "\f11b";
+$fa-var-gavel: "\f0e3";
+$fa-var-gbp: "\f154";
+$fa-var-ge: "\f1d1";
+$fa-var-gear: "\f013";
+$fa-var-gears: "\f085";
+$fa-var-genderless: "\f22d";
+$fa-var-get-pocket: "\f265";
+$fa-var-gg: "\f260";
+$fa-var-gg-circle: "\f261";
+$fa-var-gift: "\f06b";
+$fa-var-git: "\f1d3";
+$fa-var-git-square: "\f1d2";
+$fa-var-github: "\f09b";
+$fa-var-github-alt: "\f113";
+$fa-var-github-square: "\f092";
+$fa-var-gittip: "\f184";
+$fa-var-glass: "\f000";
+$fa-var-globe: "\f0ac";
+$fa-var-google: "\f1a0";
+$fa-var-google-plus: "\f0d5";
+$fa-var-google-plus-square: "\f0d4";
+$fa-var-google-wallet: "\f1ee";
+$fa-var-graduation-cap: "\f19d";
+$fa-var-gratipay: "\f184";
+$fa-var-group: "\f0c0";
+$fa-var-h-square: "\f0fd";
+$fa-var-hacker-news: "\f1d4";
+$fa-var-hand-grab-o: "\f255";
+$fa-var-hand-lizard-o: "\f258";
+$fa-var-hand-o-down: "\f0a7";
+$fa-var-hand-o-left: "\f0a5";
+$fa-var-hand-o-right: "\f0a4";
+$fa-var-hand-o-up: "\f0a6";
+$fa-var-hand-paper-o: "\f256";
+$fa-var-hand-peace-o: "\f25b";
+$fa-var-hand-pointer-o: "\f25a";
+$fa-var-hand-rock-o: "\f255";
+$fa-var-hand-scissors-o: "\f257";
+$fa-var-hand-spock-o: "\f259";
+$fa-var-hand-stop-o: "\f256";
+$fa-var-hashtag: "\f292";
+$fa-var-hdd-o: "\f0a0";
+$fa-var-header: "\f1dc";
+$fa-var-headphones: "\f025";
+$fa-var-heart: "\f004";
+$fa-var-heart-o: "\f08a";
+$fa-var-heartbeat: "\f21e";
+$fa-var-history: "\f1da";
+$fa-var-home: "\f015";
+$fa-var-hospital-o: "\f0f8";
+$fa-var-hotel: "\f236";
+$fa-var-hourglass: "\f254";
+$fa-var-hourglass-1: "\f251";
+$fa-var-hourglass-2: "\f252";
+$fa-var-hourglass-3: "\f253";
+$fa-var-hourglass-end: "\f253";
+$fa-var-hourglass-half: "\f252";
+$fa-var-hourglass-o: "\f250";
+$fa-var-hourglass-start: "\f251";
+$fa-var-houzz: "\f27c";
+$fa-var-html5: "\f13b";
+$fa-var-i-cursor: "\f246";
+$fa-var-ils: "\f20b";
+$fa-var-image: "\f03e";
+$fa-var-inbox: "\f01c";
+$fa-var-indent: "\f03c";
+$fa-var-industry: "\f275";
+$fa-var-info: "\f129";
+$fa-var-info-circle: "\f05a";
+$fa-var-inr: "\f156";
+$fa-var-instagram: "\f16d";
+$fa-var-institution: "\f19c";
+$fa-var-internet-explorer: "\f26b";
+$fa-var-intersex: "\f224";
+$fa-var-ioxhost: "\f208";
+$fa-var-italic: "\f033";
+$fa-var-joomla: "\f1aa";
+$fa-var-jpy: "\f157";
+$fa-var-jsfiddle: "\f1cc";
+$fa-var-key: "\f084";
+$fa-var-keyboard-o: "\f11c";
+$fa-var-krw: "\f159";
+$fa-var-language: "\f1ab";
+$fa-var-laptop: "\f109";
+$fa-var-lastfm: "\f202";
+$fa-var-lastfm-square: "\f203";
+$fa-var-leaf: "\f06c";
+$fa-var-leanpub: "\f212";
+$fa-var-legal: "\f0e3";
+$fa-var-lemon-o: "\f094";
+$fa-var-level-down: "\f149";
+$fa-var-level-up: "\f148";
+$fa-var-life-bouy: "\f1cd";
+$fa-var-life-buoy: "\f1cd";
+$fa-var-life-ring: "\f1cd";
+$fa-var-life-saver: "\f1cd";
+$fa-var-lightbulb-o: "\f0eb";
+$fa-var-line-chart: "\f201";
+$fa-var-link: "\f0c1";
+$fa-var-linkedin: "\f0e1";
+$fa-var-linkedin-square: "\f08c";
+$fa-var-linux: "\f17c";
+$fa-var-list: "\f03a";
+$fa-var-list-alt: "\f022";
+$fa-var-list-ol: "\f0cb";
+$fa-var-list-ul: "\f0ca";
+$fa-var-location-arrow: "\f124";
+$fa-var-lock: "\f023";
+$fa-var-long-arrow-down: "\f175";
+$fa-var-long-arrow-left: "\f177";
+$fa-var-long-arrow-right: "\f178";
+$fa-var-long-arrow-up: "\f176";
+$fa-var-magic: "\f0d0";
+$fa-var-magnet: "\f076";
+$fa-var-mail-forward: "\f064";
+$fa-var-mail-reply: "\f112";
+$fa-var-mail-reply-all: "\f122";
+$fa-var-male: "\f183";
+$fa-var-map: "\f279";
+$fa-var-map-marker: "\f041";
+$fa-var-map-o: "\f278";
+$fa-var-map-pin: "\f276";
+$fa-var-map-signs: "\f277";
+$fa-var-mars: "\f222";
+$fa-var-mars-double: "\f227";
+$fa-var-mars-stroke: "\f229";
+$fa-var-mars-stroke-h: "\f22b";
+$fa-var-mars-stroke-v: "\f22a";
+$fa-var-maxcdn: "\f136";
+$fa-var-meanpath: "\f20c";
+$fa-var-medium: "\f23a";
+$fa-var-medkit: "\f0fa";
+$fa-var-meh-o: "\f11a";
+$fa-var-mercury: "\f223";
+$fa-var-microphone: "\f130";
+$fa-var-microphone-slash: "\f131";
+$fa-var-minus: "\f068";
+$fa-var-minus-circle: "\f056";
+$fa-var-minus-square: "\f146";
+$fa-var-minus-square-o: "\f147";
+$fa-var-mixcloud: "\f289";
+$fa-var-mobile: "\f10b";
+$fa-var-mobile-phone: "\f10b";
+$fa-var-modx: "\f285";
+$fa-var-money: "\f0d6";
+$fa-var-moon-o: "\f186";
+$fa-var-mortar-board: "\f19d";
+$fa-var-motorcycle: "\f21c";
+$fa-var-mouse-pointer: "\f245";
+$fa-var-music: "\f001";
+$fa-var-navicon: "\f0c9";
+$fa-var-neuter: "\f22c";
+$fa-var-newspaper-o: "\f1ea";
+$fa-var-object-group: "\f247";
+$fa-var-object-ungroup: "\f248";
+$fa-var-odnoklassniki: "\f263";
+$fa-var-odnoklassniki-square: "\f264";
+$fa-var-opencart: "\f23d";
+$fa-var-openid: "\f19b";
+$fa-var-opera: "\f26a";
+$fa-var-optin-monster: "\f23c";
+$fa-var-outdent: "\f03b";
+$fa-var-pagelines: "\f18c";
+$fa-var-paint-brush: "\f1fc";
+$fa-var-paper-plane: "\f1d8";
+$fa-var-paper-plane-o: "\f1d9";
+$fa-var-paperclip: "\f0c6";
+$fa-var-paragraph: "\f1dd";
+$fa-var-paste: "\f0ea";
+$fa-var-pause: "\f04c";
+$fa-var-pause-circle: "\f28b";
+$fa-var-pause-circle-o: "\f28c";
+$fa-var-paw: "\f1b0";
+$fa-var-paypal: "\f1ed";
+$fa-var-pencil: "\f040";
+$fa-var-pencil-square: "\f14b";
+$fa-var-pencil-square-o: "\f044";
+$fa-var-percent: "\f295";
+$fa-var-phone: "\f095";
+$fa-var-phone-square: "\f098";
+$fa-var-photo: "\f03e";
+$fa-var-picture-o: "\f03e";
+$fa-var-pie-chart: "\f200";
+$fa-var-pied-piper: "\f1a7";
+$fa-var-pied-piper-alt: "\f1a8";
+$fa-var-pinterest: "\f0d2";
+$fa-var-pinterest-p: "\f231";
+$fa-var-pinterest-square: "\f0d3";
+$fa-var-plane: "\f072";
+$fa-var-play: "\f04b";
+$fa-var-play-circle: "\f144";
+$fa-var-play-circle-o: "\f01d";
+$fa-var-plug: "\f1e6";
+$fa-var-plus: "\f067";
+$fa-var-plus-circle: "\f055";
+$fa-var-plus-square: "\f0fe";
+$fa-var-plus-square-o: "\f196";
+$fa-var-power-off: "\f011";
+$fa-var-print: "\f02f";
+$fa-var-product-hunt: "\f288";
+$fa-var-puzzle-piece: "\f12e";
+$fa-var-qq: "\f1d6";
+$fa-var-qrcode: "\f029";
+$fa-var-question: "\f128";
+$fa-var-question-circle: "\f059";
+$fa-var-quote-left: "\f10d";
+$fa-var-quote-right: "\f10e";
+$fa-var-ra: "\f1d0";
+$fa-var-random: "\f074";
+$fa-var-rebel: "\f1d0";
+$fa-var-recycle: "\f1b8";
+$fa-var-reddit: "\f1a1";
+$fa-var-reddit-alien: "\f281";
+$fa-var-reddit-square: "\f1a2";
+$fa-var-refresh: "\f021";
+$fa-var-registered: "\f25d";
+$fa-var-remove: "\f00d";
+$fa-var-renren: "\f18b";
+$fa-var-reorder: "\f0c9";
+$fa-var-repeat: "\f01e";
+$fa-var-reply: "\f112";
+$fa-var-reply-all: "\f122";
+$fa-var-retweet: "\f079";
+$fa-var-rmb: "\f157";
+$fa-var-road: "\f018";
+$fa-var-rocket: "\f135";
+$fa-var-rotate-left: "\f0e2";
+$fa-var-rotate-right: "\f01e";
+$fa-var-rouble: "\f158";
+$fa-var-rss: "\f09e";
+$fa-var-rss-square: "\f143";
+$fa-var-rub: "\f158";
+$fa-var-ruble: "\f158";
+$fa-var-rupee: "\f156";
+$fa-var-safari: "\f267";
+$fa-var-save: "\f0c7";
+$fa-var-scissors: "\f0c4";
+$fa-var-scribd: "\f28a";
+$fa-var-search: "\f002";
+$fa-var-search-minus: "\f010";
+$fa-var-search-plus: "\f00e";
+$fa-var-sellsy: "\f213";
+$fa-var-send: "\f1d8";
+$fa-var-send-o: "\f1d9";
+$fa-var-server: "\f233";
+$fa-var-share: "\f064";
+$fa-var-share-alt: "\f1e0";
+$fa-var-share-alt-square: "\f1e1";
+$fa-var-share-square: "\f14d";
+$fa-var-share-square-o: "\f045";
+$fa-var-shekel: "\f20b";
+$fa-var-sheqel: "\f20b";
+$fa-var-shield: "\f132";
+$fa-var-ship: "\f21a";
+$fa-var-shirtsinbulk: "\f214";
+$fa-var-shopping-bag: "\f290";
+$fa-var-shopping-basket: "\f291";
+$fa-var-shopping-cart: "\f07a";
+$fa-var-sign-in: "\f090";
+$fa-var-sign-out: "\f08b";
+$fa-var-signal: "\f012";
+$fa-var-simplybuilt: "\f215";
+$fa-var-sitemap: "\f0e8";
+$fa-var-skyatlas: "\f216";
+$fa-var-skype: "\f17e";
+$fa-var-slack: "\f198";
+$fa-var-sliders: "\f1de";
+$fa-var-slideshare: "\f1e7";
+$fa-var-smile-o: "\f118";
+$fa-var-soccer-ball-o: "\f1e3";
+$fa-var-sort: "\f0dc";
+$fa-var-sort-alpha-asc: "\f15d";
+$fa-var-sort-alpha-desc: "\f15e";
+$fa-var-sort-amount-asc: "\f160";
+$fa-var-sort-amount-desc: "\f161";
+$fa-var-sort-asc: "\f0de";
+$fa-var-sort-desc: "\f0dd";
+$fa-var-sort-down: "\f0dd";
+$fa-var-sort-numeric-asc: "\f162";
+$fa-var-sort-numeric-desc: "\f163";
+$fa-var-sort-up: "\f0de";
+$fa-var-soundcloud: "\f1be";
+$fa-var-space-shuttle: "\f197";
+$fa-var-spinner: "\f110";
+$fa-var-spoon: "\f1b1";
+$fa-var-spotify: "\f1bc";
+$fa-var-square: "\f0c8";
+$fa-var-square-o: "\f096";
+$fa-var-stack-exchange: "\f18d";
+$fa-var-stack-overflow: "\f16c";
+$fa-var-star: "\f005";
+$fa-var-star-half: "\f089";
+$fa-var-star-half-empty: "\f123";
+$fa-var-star-half-full: "\f123";
+$fa-var-star-half-o: "\f123";
+$fa-var-star-o: "\f006";
+$fa-var-steam: "\f1b6";
+$fa-var-steam-square: "\f1b7";
+$fa-var-step-backward: "\f048";
+$fa-var-step-forward: "\f051";
+$fa-var-stethoscope: "\f0f1";
+$fa-var-sticky-note: "\f249";
+$fa-var-sticky-note-o: "\f24a";
+$fa-var-stop: "\f04d";
+$fa-var-stop-circle: "\f28d";
+$fa-var-stop-circle-o: "\f28e";
+$fa-var-street-view: "\f21d";
+$fa-var-strikethrough: "\f0cc";
+$fa-var-stumbleupon: "\f1a4";
+$fa-var-stumbleupon-circle: "\f1a3";
+$fa-var-subscript: "\f12c";
+$fa-var-subway: "\f239";
+$fa-var-suitcase: "\f0f2";
+$fa-var-sun-o: "\f185";
+$fa-var-superscript: "\f12b";
+$fa-var-support: "\f1cd";
+$fa-var-table: "\f0ce";
+$fa-var-tablet: "\f10a";
+$fa-var-tachometer: "\f0e4";
+$fa-var-tag: "\f02b";
+$fa-var-tags: "\f02c";
+$fa-var-tasks: "\f0ae";
+$fa-var-taxi: "\f1ba";
+$fa-var-television: "\f26c";
+$fa-var-tencent-weibo: "\f1d5";
+$fa-var-terminal: "\f120";
+$fa-var-text-height: "\f034";
+$fa-var-text-width: "\f035";
+$fa-var-th: "\f00a";
+$fa-var-th-large: "\f009";
+$fa-var-th-list: "\f00b";
+$fa-var-thumb-tack: "\f08d";
+$fa-var-thumbs-down: "\f165";
+$fa-var-thumbs-o-down: "\f088";
+$fa-var-thumbs-o-up: "\f087";
+$fa-var-thumbs-up: "\f164";
+$fa-var-ticket: "\f145";
+$fa-var-times: "\f00d";
+$fa-var-times-circle: "\f057";
+$fa-var-times-circle-o: "\f05c";
+$fa-var-tint: "\f043";
+$fa-var-toggle-down: "\f150";
+$fa-var-toggle-left: "\f191";
+$fa-var-toggle-off: "\f204";
+$fa-var-toggle-on: "\f205";
+$fa-var-toggle-right: "\f152";
+$fa-var-toggle-up: "\f151";
+$fa-var-trademark: "\f25c";
+$fa-var-train: "\f238";
+$fa-var-transgender: "\f224";
+$fa-var-transgender-alt: "\f225";
+$fa-var-trash: "\f1f8";
+$fa-var-trash-o: "\f014";
+$fa-var-tree: "\f1bb";
+$fa-var-trello: "\f181";
+$fa-var-tripadvisor: "\f262";
+$fa-var-trophy: "\f091";
+$fa-var-truck: "\f0d1";
+$fa-var-try: "\f195";
+$fa-var-tty: "\f1e4";
+$fa-var-tumblr: "\f173";
+$fa-var-tumblr-square: "\f174";
+$fa-var-turkish-lira: "\f195";
+$fa-var-tv: "\f26c";
+$fa-var-twitch: "\f1e8";
+$fa-var-twitter: "\f099";
+$fa-var-twitter-square: "\f081";
+$fa-var-umbrella: "\f0e9";
+$fa-var-underline: "\f0cd";
+$fa-var-undo: "\f0e2";
+$fa-var-university: "\f19c";
+$fa-var-unlink: "\f127";
+$fa-var-unlock: "\f09c";
+$fa-var-unlock-alt: "\f13e";
+$fa-var-unsorted: "\f0dc";
+$fa-var-upload: "\f093";
+$fa-var-usb: "\f287";
+$fa-var-usd: "\f155";
+$fa-var-user: "\f007";
+$fa-var-user-md: "\f0f0";
+$fa-var-user-plus: "\f234";
+$fa-var-user-secret: "\f21b";
+$fa-var-user-times: "\f235";
+$fa-var-users: "\f0c0";
+$fa-var-venus: "\f221";
+$fa-var-venus-double: "\f226";
+$fa-var-venus-mars: "\f228";
+$fa-var-viacoin: "\f237";
+$fa-var-video-camera: "\f03d";
+$fa-var-vimeo: "\f27d";
+$fa-var-vimeo-square: "\f194";
+$fa-var-vine: "\f1ca";
+$fa-var-vk: "\f189";
+$fa-var-volume-down: "\f027";
+$fa-var-volume-off: "\f026";
+$fa-var-volume-up: "\f028";
+$fa-var-warning: "\f071";
+$fa-var-wechat: "\f1d7";
+$fa-var-weibo: "\f18a";
+$fa-var-weixin: "\f1d7";
+$fa-var-whatsapp: "\f232";
+$fa-var-wheelchair: "\f193";
+$fa-var-wifi: "\f1eb";
+$fa-var-wikipedia-w: "\f266";
+$fa-var-windows: "\f17a";
+$fa-var-won: "\f159";
+$fa-var-wordpress: "\f19a";
+$fa-var-wrench: "\f0ad";
+$fa-var-xing: "\f168";
+$fa-var-xing-square: "\f169";
+$fa-var-y-combinator: "\f23b";
+$fa-var-y-combinator-square: "\f1d4";
+$fa-var-yahoo: "\f19e";
+$fa-var-yc: "\f23b";
+$fa-var-yc-square: "\f1d4";
+$fa-var-yelp: "\f1e9";
+$fa-var-yen: "\f157";
+$fa-var-youtube: "\f167";
+$fa-var-youtube-play: "\f16a";
+$fa-var-youtube-square: "\f166";
+
diff --git a/asset/static/fonts/scss/font-awesome.scss b/asset/static/fonts/scss/font-awesome.scss
new file mode 100755
index 0000000..f4668a5
--- /dev/null
+++ b/asset/static/fonts/scss/font-awesome.scss
@@ -0,0 +1,17 @@
+/*!
+ * Font Awesome 4.5.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+
+@import "variables";
+@import "mixins";
+@import "path";
+@import "core";
+@import "larger";
+@import "fixed-width";
+@import "list";
+@import "bordered-pulled";
+@import "animated";
+@import "rotated-flipped";
+@import "stacked";
+@import "icons";
diff --git a/asset/static/images/1.png b/asset/static/images/1.png
new file mode 100755
index 0000000..0eaf2b7
Binary files /dev/null and b/asset/static/images/1.png differ
diff --git a/asset/static/images/4.jpg b/asset/static/images/4.jpg
new file mode 100755
index 0000000..1b426c2
Binary files /dev/null and b/asset/static/images/4.jpg differ
diff --git a/asset/static/images/Sorting icons.psd b/asset/static/images/Sorting icons.psd
new file mode 100755
index 0000000..53b2e06
Binary files /dev/null and b/asset/static/images/Sorting icons.psd differ
diff --git a/asset/static/images/american-express.png b/asset/static/images/american-express.png
new file mode 100755
index 0000000..fbe9ce2
Binary files /dev/null and b/asset/static/images/american-express.png differ
diff --git a/asset/static/images/back_disabled.png b/asset/static/images/back_disabled.png
new file mode 100755
index 0000000..881de79
Binary files /dev/null and b/asset/static/images/back_disabled.png differ
diff --git a/asset/static/images/back_enabled.png b/asset/static/images/back_enabled.png
new file mode 100755
index 0000000..c608682
Binary files /dev/null and b/asset/static/images/back_enabled.png differ
diff --git a/asset/static/images/back_enabled_hover.png b/asset/static/images/back_enabled_hover.png
new file mode 100755
index 0000000..d300f10
Binary files /dev/null and b/asset/static/images/back_enabled_hover.png differ
diff --git a/asset/static/images/bootstrap-colorpicker/alpha-horizontal.png b/asset/static/images/bootstrap-colorpicker/alpha-horizontal.png
new file mode 100755
index 0000000..d0a65c0
Binary files /dev/null and b/asset/static/images/bootstrap-colorpicker/alpha-horizontal.png differ
diff --git a/asset/static/images/bootstrap-colorpicker/alpha.png b/asset/static/images/bootstrap-colorpicker/alpha.png
new file mode 100755
index 0000000..38043f1
Binary files /dev/null and b/asset/static/images/bootstrap-colorpicker/alpha.png differ
diff --git a/asset/static/images/bootstrap-colorpicker/hue-horizontal.png b/asset/static/images/bootstrap-colorpicker/hue-horizontal.png
new file mode 100755
index 0000000..a0d9add
Binary files /dev/null and b/asset/static/images/bootstrap-colorpicker/hue-horizontal.png differ
diff --git a/asset/static/images/bootstrap-colorpicker/hue.png b/asset/static/images/bootstrap-colorpicker/hue.png
new file mode 100755
index 0000000..d89560e
Binary files /dev/null and b/asset/static/images/bootstrap-colorpicker/hue.png differ
diff --git a/asset/static/images/bootstrap-colorpicker/saturation.png b/asset/static/images/bootstrap-colorpicker/saturation.png
new file mode 100755
index 0000000..594ae50
Binary files /dev/null and b/asset/static/images/bootstrap-colorpicker/saturation.png differ
diff --git a/asset/static/images/data.png b/asset/static/images/data.png
new file mode 100755
index 0000000..669983e
Binary files /dev/null and b/asset/static/images/data.png differ
diff --git a/asset/static/images/favicon.ico b/asset/static/images/favicon.ico
new file mode 100755
index 0000000..6eeaa2a
Binary files /dev/null and b/asset/static/images/favicon.ico differ
diff --git a/asset/static/images/forward_disabled.png b/asset/static/images/forward_disabled.png
new file mode 100755
index 0000000..6a6ded7
Binary files /dev/null and b/asset/static/images/forward_disabled.png differ
diff --git a/asset/static/images/forward_enabled.png b/asset/static/images/forward_enabled.png
new file mode 100755
index 0000000..a4e6b53
Binary files /dev/null and b/asset/static/images/forward_enabled.png differ
diff --git a/asset/static/images/forward_enabled_hover.png b/asset/static/images/forward_enabled_hover.png
new file mode 100755
index 0000000..fc46c5e
Binary files /dev/null and b/asset/static/images/forward_enabled_hover.png differ
diff --git a/asset/static/images/icons.png b/asset/static/images/icons.png
new file mode 100755
index 0000000..903afb4
Binary files /dev/null and b/asset/static/images/icons.png differ
diff --git a/asset/static/images/img.jpg b/asset/static/images/img.jpg
new file mode 100755
index 0000000..54f619e
Binary files /dev/null and b/asset/static/images/img.jpg differ
diff --git a/asset/static/images/loading.gif b/asset/static/images/loading.gif
new file mode 100755
index 0000000..2e4f98c
Binary files /dev/null and b/asset/static/images/loading.gif differ
diff --git a/asset/static/images/mastercard.png b/asset/static/images/mastercard.png
new file mode 100755
index 0000000..f709adb
Binary files /dev/null and b/asset/static/images/mastercard.png differ
diff --git a/asset/static/images/paypal2.png b/asset/static/images/paypal2.png
new file mode 100755
index 0000000..b0ca241
Binary files /dev/null and b/asset/static/images/paypal2.png differ
diff --git a/asset/static/images/picture-2.jpg b/asset/static/images/picture-2.jpg
new file mode 100755
index 0000000..9d4df0f
Binary files /dev/null and b/asset/static/images/picture-2.jpg differ
diff --git a/asset/static/images/picture.jpg b/asset/static/images/picture.jpg
new file mode 100755
index 0000000..596f01d
Binary files /dev/null and b/asset/static/images/picture.jpg differ
diff --git a/asset/static/images/picture2.jpg b/asset/static/images/picture2.jpg
new file mode 100755
index 0000000..e4e7c48
Binary files /dev/null and b/asset/static/images/picture2.jpg differ
diff --git a/asset/static/images/prod1.jpg b/asset/static/images/prod1.jpg
new file mode 100755
index 0000000..861c5f3
Binary files /dev/null and b/asset/static/images/prod1.jpg differ
diff --git a/asset/static/images/prod2.jpg b/asset/static/images/prod2.jpg
new file mode 100755
index 0000000..eae5a6e
Binary files /dev/null and b/asset/static/images/prod2.jpg differ
diff --git a/asset/static/images/prod3.jpg b/asset/static/images/prod3.jpg
new file mode 100755
index 0000000..f3d2332
Binary files /dev/null and b/asset/static/images/prod3.jpg differ
diff --git a/asset/static/images/prod4.jpg b/asset/static/images/prod4.jpg
new file mode 100755
index 0000000..35caa2e
Binary files /dev/null and b/asset/static/images/prod4.jpg differ
diff --git a/asset/static/images/prod5.jpg b/asset/static/images/prod5.jpg
new file mode 100755
index 0000000..c03d7b6
Binary files /dev/null and b/asset/static/images/prod5.jpg differ
diff --git a/asset/static/images/sort_asc.png b/asset/static/images/sort_asc.png
new file mode 100755
index 0000000..a88d797
Binary files /dev/null and b/asset/static/images/sort_asc.png differ
diff --git a/asset/static/images/sort_asc_disabled.png b/asset/static/images/sort_asc_disabled.png
new file mode 100755
index 0000000..4e144cf
Binary files /dev/null and b/asset/static/images/sort_asc_disabled.png differ
diff --git a/asset/static/images/sort_both.png b/asset/static/images/sort_both.png
new file mode 100755
index 0000000..1867040
Binary files /dev/null and b/asset/static/images/sort_both.png differ
diff --git a/asset/static/images/sort_desc.png b/asset/static/images/sort_desc.png
new file mode 100755
index 0000000..def071e
Binary files /dev/null and b/asset/static/images/sort_desc.png differ
diff --git a/asset/static/images/sort_desc_disabled.png b/asset/static/images/sort_desc_disabled.png
new file mode 100755
index 0000000..7824973
Binary files /dev/null and b/asset/static/images/sort_desc_disabled.png differ
diff --git a/asset/static/images/sprite-skin-flat.png b/asset/static/images/sprite-skin-flat.png
new file mode 100755
index 0000000..0f11c7c
Binary files /dev/null and b/asset/static/images/sprite-skin-flat.png differ
diff --git a/asset/static/images/sprite-skin-modern.png b/asset/static/images/sprite-skin-modern.png
new file mode 100755
index 0000000..c9060f2
Binary files /dev/null and b/asset/static/images/sprite-skin-modern.png differ
diff --git a/asset/static/images/sprite-skin-nice.png b/asset/static/images/sprite-skin-nice.png
new file mode 100755
index 0000000..9b0a4bc
Binary files /dev/null and b/asset/static/images/sprite-skin-nice.png differ
diff --git a/asset/static/images/sprite-skin-simple.png b/asset/static/images/sprite-skin-simple.png
new file mode 100755
index 0000000..0dc5e68
Binary files /dev/null and b/asset/static/images/sprite-skin-simple.png differ
diff --git a/asset/static/images/user.png b/asset/static/images/user.png
new file mode 100755
index 0000000..c0b9594
Binary files /dev/null and b/asset/static/images/user.png differ
diff --git a/asset/static/images/visa.png b/asset/static/images/visa.png
new file mode 100755
index 0000000..7099cdf
Binary files /dev/null and b/asset/static/images/visa.png differ
diff --git a/asset/static/js/autocomplete/countries.js b/asset/static/js/autocomplete/countries.js
new file mode 100755
index 0000000..7d41ee4
--- /dev/null
+++ b/asset/static/js/autocomplete/countries.js
@@ -0,0 +1,267 @@
+var countries = {
+ "AD": "Andorra",
+ "A2": "Andorra Test",
+ "AE": "United Arab Emirates",
+ "AF": "Afghanistan",
+ "AG": "Antigua and Barbuda",
+ "AI": "Anguilla",
+ "AL": "Albania",
+ "AM": "Armenia",
+ "AN": "Netherlands Antilles",
+ "AO": "Angola",
+ "AQ": "Antarctica",
+ "AR": "Argentina",
+ "AS": "American Samoa",
+ "AT": "Austria",
+ "AU": "Australia",
+ "AW": "Aruba",
+ "AX": "\u00c5land Islands",
+ "AZ": "Azerbaijan",
+ "BA": "Bosnia and Herzegovina",
+ "BB": "Barbados",
+ "BD": "Bangladesh",
+ "BE": "Belgium",
+ "BF": "Burkina Faso",
+ "BG": "Bulgaria",
+ "BH": "Bahrain",
+ "BI": "Burundi",
+ "BJ": "Benin",
+ "BL": "Saint Barth\u00e9lemy",
+ "BM": "Bermuda",
+ "BN": "Brunei",
+ "BO": "Bolivia",
+ "BQ": "British Antarctic Territory",
+ "BR": "Brazil",
+ "BS": "Bahamas",
+ "BT": "Bhutan",
+ "BV": "Bouvet Island",
+ "BW": "Botswana",
+ "BY": "Belarus",
+ "BZ": "Belize",
+ "CA": "Canada",
+ "CC": "Cocos [Keeling] Islands",
+ "CD": "Congo - Kinshasa",
+ "CF": "Central African Republic",
+ "CG": "Congo - Brazzaville",
+ "CH": "Switzerland",
+ "CI": "C\u00f4te d\u2019Ivoire",
+ "CK": "Cook Islands",
+ "CL": "Chile",
+ "CM": "Cameroon",
+ "CN": "China",
+ "CO": "Colombia",
+ "CR": "Costa Rica",
+ "CS": "Serbia and Montenegro",
+ "CT": "Canton and Enderbury Islands",
+ "CU": "Cuba",
+ "CV": "Cape Verde",
+ "CX": "Christmas Island",
+ "CY": "Cyprus",
+ "CZ": "Czech Republic",
+ "DD": "East Germany",
+ "DE": "Germany",
+ "DJ": "Djibouti",
+ "DK": "Denmark",
+ "DM": "Dominica",
+ "DO": "Dominican Republic",
+ "DZ": "Algeria",
+ "EC": "Ecuador",
+ "EE": "Estonia",
+ "EG": "Egypt",
+ "EH": "Western Sahara",
+ "ER": "Eritrea",
+ "ES": "Spain",
+ "ET": "Ethiopia",
+ "FI": "Finland",
+ "FJ": "Fiji",
+ "FK": "Falkland Islands",
+ "FM": "Micronesia",
+ "FO": "Faroe Islands",
+ "FQ": "French Southern and Antarctic Territories",
+ "FR": "France",
+ "FX": "Metropolitan France",
+ "GA": "Gabon",
+ "GB": "United Kingdom",
+ "GD": "Grenada",
+ "GE": "Georgia",
+ "GF": "French Guiana",
+ "GG": "Guernsey",
+ "GH": "Ghana",
+ "GI": "Gibraltar",
+ "GL": "Greenland",
+ "GM": "Gambia",
+ "GN": "Guinea",
+ "GP": "Guadeloupe",
+ "GQ": "Equatorial Guinea",
+ "GR": "Greece",
+ "GS": "South Georgia and the South Sandwich Islands",
+ "GT": "Guatemala",
+ "GU": "Guam",
+ "GW": "Guinea-Bissau",
+ "GY": "Guyana",
+ "HK": "Hong Kong SAR China",
+ "HM": "Heard Island and McDonald Islands",
+ "HN": "Honduras",
+ "HR": "Croatia",
+ "HT": "Haiti",
+ "HU": "Hungary",
+ "ID": "Indonesia",
+ "IE": "Ireland",
+ "IL": "Israel",
+ "IM": "Isle of Man",
+ "IN": "India",
+ "IO": "British Indian Ocean Territory",
+ "IQ": "Iraq",
+ "IR": "Iran",
+ "IS": "Iceland",
+ "IT": "Italy",
+ "JE": "Jersey",
+ "JM": "Jamaica",
+ "JO": "Jordan",
+ "JP": "Japan",
+ "JT": "Johnston Island",
+ "KE": "Kenya",
+ "KG": "Kyrgyzstan",
+ "KH": "Cambodia",
+ "KI": "Kiribati",
+ "KM": "Comoros",
+ "KN": "Saint Kitts and Nevis",
+ "KP": "North Korea",
+ "KR": "South Korea",
+ "KW": "Kuwait",
+ "KY": "Cayman Islands",
+ "KZ": "Kazakhstan",
+ "LA": "Laos",
+ "LB": "Lebanon",
+ "LC": "Saint Lucia",
+ "LI": "Liechtenstein",
+ "LK": "Sri Lanka",
+ "LR": "Liberia",
+ "LS": "Lesotho",
+ "LT": "Lithuania",
+ "LU": "Luxembourg",
+ "LV": "Latvia",
+ "LY": "Libya",
+ "MA": "Morocco",
+ "MC": "Monaco",
+ "MD": "Moldova",
+ "ME": "Montenegro",
+ "MF": "Saint Martin",
+ "MG": "Madagascar",
+ "MH": "Marshall Islands",
+ "MI": "Midway Islands",
+ "MK": "Macedonia",
+ "ML": "Mali",
+ "MM": "Myanmar [Burma]",
+ "MN": "Mongolia",
+ "MO": "Macau SAR China",
+ "MP": "Northern Mariana Islands",
+ "MQ": "Martinique",
+ "MR": "Mauritania",
+ "MS": "Montserrat",
+ "MT": "Malta",
+ "MU": "Mauritius",
+ "MV": "Maldives",
+ "MW": "Malawi",
+ "MX": "Mexico",
+ "MY": "Malaysia",
+ "MZ": "Mozambique",
+ "NA": "Namibia",
+ "NC": "New Caledonia",
+ "NE": "Niger",
+ "NF": "Norfolk Island",
+ "NG": "Nigeria",
+ "NI": "Nicaragua",
+ "NL": "Netherlands",
+ "NO": "Norway",
+ "NP": "Nepal",
+ "NQ": "Dronning Maud Land",
+ "NR": "Nauru",
+ "NT": "Neutral Zone",
+ "NU": "Niue",
+ "NZ": "New Zealand",
+ "OM": "Oman",
+ "PA": "Panama",
+ "PC": "Pacific Islands Trust Territory",
+ "PE": "Peru",
+ "PF": "French Polynesia",
+ "PG": "Papua New Guinea",
+ "PH": "Philippines",
+ "PK": "Pakistan",
+ "PL": "Poland",
+ "PM": "Saint Pierre and Miquelon",
+ "PN": "Pitcairn Islands",
+ "PR": "Puerto Rico",
+ "PS": "Palestinian Territories",
+ "PT": "Portugal",
+ "PU": "U.S. Miscellaneous Pacific Islands",
+ "PW": "Palau",
+ "PY": "Paraguay",
+ "PZ": "Panama Canal Zone",
+ "QA": "Qatar",
+ "RE": "R\u00e9union",
+ "RO": "Romania",
+ "RS": "Serbia",
+ "RU": "Russia",
+ "RW": "Rwanda",
+ "SA": "Saudi Arabia",
+ "SB": "Solomon Islands",
+ "SC": "Seychelles",
+ "SD": "Sudan",
+ "SE": "Sweden",
+ "SG": "Singapore",
+ "SH": "Saint Helena",
+ "SI": "Slovenia",
+ "SJ": "Svalbard and Jan Mayen",
+ "SK": "Slovakia",
+ "SL": "Sierra Leone",
+ "SM": "San Marino",
+ "SN": "Senegal",
+ "SO": "Somalia",
+ "SR": "Suriname",
+ "ST": "S\u00e3o Tom\u00e9 and Pr\u00edncipe",
+ "SU": "Union of Soviet Socialist Republics",
+ "SV": "El Salvador",
+ "SY": "Syria",
+ "SZ": "Swaziland",
+ "TC": "Turks and Caicos Islands",
+ "TD": "Chad",
+ "TF": "French Southern Territories",
+ "TG": "Togo",
+ "TH": "Thailand",
+ "TJ": "Tajikistan",
+ "TK": "Tokelau",
+ "TL": "Timor-Leste",
+ "TM": "Turkmenistan",
+ "TN": "Tunisia",
+ "TO": "Tonga",
+ "TR": "Turkey",
+ "TT": "Trinidad and Tobago",
+ "TV": "Tuvalu",
+ "TW": "Taiwan",
+ "TZ": "Tanzania",
+ "UA": "Ukraine",
+ "UG": "Uganda",
+ "UM": "U.S. Minor Outlying Islands",
+ "US": "United States",
+ "UY": "Uruguay",
+ "UZ": "Uzbekistan",
+ "VA": "Vatican City",
+ "VC": "Saint Vincent and the Grenadines",
+ "VD": "North Vietnam",
+ "VE": "Venezuela",
+ "VG": "British Virgin Islands",
+ "VI": "U.S. Virgin Islands",
+ "VN": "Vietnam",
+ "VU": "Vanuatu",
+ "WF": "Wallis and Futuna",
+ "WK": "Wake Island",
+ "WS": "Samoa",
+ "YD": "People's Democratic Republic of Yemen",
+ "YE": "Yemen",
+ "YT": "Mayotte",
+ "ZA": "South Africa",
+ "ZM": "Zambia",
+ "ZW": "Zimbabwe",
+ "ZZ": "Unknown or Invalid Region"
+}
\ No newline at end of file
diff --git a/asset/static/js/autocomplete/jquery.autocomplete.js b/asset/static/js/autocomplete/jquery.autocomplete.js
new file mode 100755
index 0000000..81ee81d
--- /dev/null
+++ b/asset/static/js/autocomplete/jquery.autocomplete.js
@@ -0,0 +1,986 @@
+/**
+* Ajax Autocomplete for jQuery, version 1.2.27
+* (c) 2015 Tomas Kirda
+*
+* Ajax Autocomplete for jQuery is freely distributable under the terms of an MIT-style license.
+* For details, see the web site: https://github.com/devbridge/jQuery-Autocomplete
+*/
+
+/*jslint browser: true, white: true, plusplus: true, vars: true */
+/*global define, window, document, jQuery, exports, require */
+
+// Expose plugin as an AMD module if AMD loader is present:
+(function (factory) {
+ 'use strict';
+ if (typeof define === 'function' && define.amd) {
+ // AMD. Register as an anonymous module.
+ define(['jquery'], factory);
+ } else if (typeof exports === 'object' && typeof require === 'function') {
+ // Browserify
+ factory(require('jquery'));
+ } else {
+ // Browser globals
+ factory(jQuery);
+ }
+}(function ($) {
+ 'use strict';
+
+ var
+ utils = (function () {
+ return {
+ escapeRegExChars: function (value) {
+ return value.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, "\\$&");
+ },
+ createNode: function (containerClass) {
+ var div = document.createElement('div');
+ div.className = containerClass;
+ div.style.position = 'absolute';
+ div.style.display = 'none';
+ return div;
+ }
+ };
+ }()),
+
+ keys = {
+ ESC: 27,
+ TAB: 9,
+ RETURN: 13,
+ LEFT: 37,
+ UP: 38,
+ RIGHT: 39,
+ DOWN: 40
+ };
+
+ function Autocomplete(el, options) {
+ var noop = function () { },
+ that = this,
+ defaults = {
+ ajaxSettings: {},
+ autoSelectFirst: false,
+ appendTo: document.body,
+ serviceUrl: null,
+ lookup: null,
+ onSelect: null,
+ width: 'auto',
+ minChars: 1,
+ maxHeight: 300,
+ deferRequestBy: 0,
+ params: {},
+ formatResult: Autocomplete.formatResult,
+ delimiter: null,
+ zIndex: 9999,
+ type: 'GET',
+ noCache: false,
+ onSearchStart: noop,
+ onSearchComplete: noop,
+ onSearchError: noop,
+ preserveInput: false,
+ containerClass: 'autocomplete-suggestions',
+ tabDisabled: false,
+ dataType: 'text',
+ currentRequest: null,
+ triggerSelectOnValidInput: true,
+ preventBadQueries: true,
+ lookupFilter: function (suggestion, originalQuery, queryLowerCase) {
+ return suggestion.value.toLowerCase().indexOf(queryLowerCase) !== -1;
+ },
+ paramName: 'query',
+ transformResult: function (response) {
+ return typeof response === 'string' ? $.parseJSON(response) : response;
+ },
+ showNoSuggestionNotice: false,
+ noSuggestionNotice: 'No results',
+ orientation: 'bottom',
+ forceFixPosition: false
+ };
+
+ // Shared variables:
+ that.element = el;
+ that.el = $(el);
+ that.suggestions = [];
+ that.badQueries = [];
+ that.selectedIndex = -1;
+ that.currentValue = that.element.value;
+ that.intervalId = 0;
+ that.cachedResponse = {};
+ that.onChangeInterval = null;
+ that.onChange = null;
+ that.isLocal = false;
+ that.suggestionsContainer = null;
+ that.noSuggestionsContainer = null;
+ that.options = $.extend({}, defaults, options);
+ that.classes = {
+ selected: 'autocomplete-selected',
+ suggestion: 'autocomplete-suggestion'
+ };
+ that.hint = null;
+ that.hintValue = '';
+ that.selection = null;
+
+ // Initialize and set options:
+ that.initialize();
+ that.setOptions(options);
+ }
+
+ Autocomplete.utils = utils;
+
+ $.Autocomplete = Autocomplete;
+
+ Autocomplete.formatResult = function (suggestion, currentValue) {
+ // Do not replace anything if there current value is empty
+ if (!currentValue) {
+ return suggestion.value;
+ }
+
+ var pattern = '(' + utils.escapeRegExChars(currentValue) + ')';
+
+ return suggestion.value
+ .replace(new RegExp(pattern, 'gi'), '$1<\/strong>')
+ .replace(/&/g, '&')
+ .replace(//g, '>')
+ .replace(/"/g, '"')
+ .replace(/<(\/?strong)>/g, '<$1>');
+ };
+
+ Autocomplete.prototype = {
+
+ killerFn: null,
+
+ initialize: function () {
+ var that = this,
+ suggestionSelector = '.' + that.classes.suggestion,
+ selected = that.classes.selected,
+ options = that.options,
+ container;
+
+ // Remove autocomplete attribute to prevent native suggestions:
+ that.element.setAttribute('autocomplete', 'off');
+
+ that.killerFn = function (e) {
+ if ($(e.target).closest('.' + that.options.containerClass).length === 0) {
+ that.killSuggestions();
+ that.disableKillerFn();
+ }
+ };
+
+ // html() deals with many types: htmlString or Element or Array or jQuery
+ that.noSuggestionsContainer = $('
')
+ .html(this.options.noSuggestionNotice).get(0);
+
+ that.suggestionsContainer = Autocomplete.utils.createNode(options.containerClass);
+
+ container = $(that.suggestionsContainer);
+
+ container.appendTo(options.appendTo);
+
+ // Only set width if it was provided:
+ if (options.width !== 'auto') {
+ container.width(options.width);
+ }
+
+ // Listen for mouse over event on suggestions list:
+ container.on('mouseover.autocomplete', suggestionSelector, function () {
+ that.activate($(this).data('index'));
+ });
+
+ // Deselect active element when mouse leaves suggestions container:
+ container.on('mouseout.autocomplete', function () {
+ that.selectedIndex = -1;
+ container.children('.' + selected).removeClass(selected);
+ });
+
+ // Listen for click event on suggestions list:
+ container.on('click.autocomplete', suggestionSelector, function () {
+ that.select($(this).data('index'));
+ });
+
+ that.fixPositionCapture = function () {
+ if (that.visible) {
+ that.fixPosition();
+ }
+ };
+
+ $(window).on('resize.autocomplete', that.fixPositionCapture);
+
+ that.el.on('keydown.autocomplete', function (e) { that.onKeyPress(e); });
+ that.el.on('keyup.autocomplete', function (e) { that.onKeyUp(e); });
+ that.el.on('blur.autocomplete', function () { that.onBlur(); });
+ that.el.on('focus.autocomplete', function () { that.onFocus(); });
+ that.el.on('change.autocomplete', function (e) { that.onKeyUp(e); });
+ that.el.on('input.autocomplete', function (e) { that.onKeyUp(e); });
+ },
+
+ onFocus: function () {
+ var that = this;
+
+ that.fixPosition();
+
+ if (that.el.val().length >= that.options.minChars) {
+ that.onValueChange();
+ }
+ },
+
+ onBlur: function () {
+ this.enableKillerFn();
+ },
+
+ abortAjax: function () {
+ var that = this;
+ if (that.currentRequest) {
+ that.currentRequest.abort();
+ that.currentRequest = null;
+ }
+ },
+
+ setOptions: function (suppliedOptions) {
+ var that = this,
+ options = that.options;
+
+ $.extend(options, suppliedOptions);
+
+ that.isLocal = $.isArray(options.lookup);
+
+ if (that.isLocal) {
+ options.lookup = that.verifySuggestionsFormat(options.lookup);
+ }
+
+ options.orientation = that.validateOrientation(options.orientation, 'bottom');
+
+ // Adjust height, width and z-index:
+ $(that.suggestionsContainer).css({
+ 'max-height': options.maxHeight + 'px',
+ 'width': options.width + 'px',
+ 'z-index': options.zIndex
+ });
+ },
+
+
+ clearCache: function () {
+ this.cachedResponse = {};
+ this.badQueries = [];
+ },
+
+ clear: function () {
+ this.clearCache();
+ this.currentValue = '';
+ this.suggestions = [];
+ },
+
+ disable: function () {
+ var that = this;
+ that.disabled = true;
+ clearInterval(that.onChangeInterval);
+ that.abortAjax();
+ },
+
+ enable: function () {
+ this.disabled = false;
+ },
+
+ fixPosition: function () {
+ // Use only when container has already its content
+
+ var that = this,
+ $container = $(that.suggestionsContainer),
+ containerParent = $container.parent().get(0);
+ // Fix position automatically when appended to body.
+ // In other cases force parameter must be given.
+ if (containerParent !== document.body && !that.options.forceFixPosition) {
+ return;
+ }
+
+ // Choose orientation
+ var orientation = that.options.orientation,
+ containerHeight = $container.outerHeight(),
+ height = that.el.outerHeight(),
+ offset = that.el.offset(),
+ styles = { 'top': offset.top, 'left': offset.left };
+
+ if (orientation === 'auto') {
+ var viewPortHeight = $(window).height(),
+ scrollTop = $(window).scrollTop(),
+ topOverflow = -scrollTop + offset.top - containerHeight,
+ bottomOverflow = scrollTop + viewPortHeight - (offset.top + height + containerHeight);
+
+ orientation = (Math.max(topOverflow, bottomOverflow) === topOverflow) ? 'top' : 'bottom';
+ }
+
+ if (orientation === 'top') {
+ styles.top += -containerHeight;
+ } else {
+ styles.top += height;
+ }
+
+ // If container is not positioned to body,
+ // correct its position using offset parent offset
+ if(containerParent !== document.body) {
+ var opacity = $container.css('opacity'),
+ parentOffsetDiff;
+
+ if (!that.visible){
+ $container.css('opacity', 0).show();
+ }
+
+ parentOffsetDiff = $container.offsetParent().offset();
+ styles.top -= parentOffsetDiff.top;
+ styles.left -= parentOffsetDiff.left;
+
+ if (!that.visible){
+ $container.css('opacity', opacity).hide();
+ }
+ }
+
+ // -2px to account for suggestions border.
+ if (that.options.width === 'auto') {
+ styles.width = (that.el.outerWidth() - 2) + 'px';
+ }
+
+ $container.css(styles);
+ },
+
+ enableKillerFn: function () {
+ var that = this;
+ $(document).on('click.autocomplete', that.killerFn);
+ },
+
+ disableKillerFn: function () {
+ var that = this;
+ $(document).off('click.autocomplete', that.killerFn);
+ },
+
+ killSuggestions: function () {
+ var that = this;
+ that.stopKillSuggestions();
+ that.intervalId = window.setInterval(function () {
+ if (that.visible) {
+ that.el.val(that.currentValue);
+ that.hide();
+ }
+
+ that.stopKillSuggestions();
+ }, 50);
+ },
+
+ stopKillSuggestions: function () {
+ window.clearInterval(this.intervalId);
+ },
+
+ isCursorAtEnd: function () {
+ var that = this,
+ valLength = that.el.val().length,
+ selectionStart = that.element.selectionStart,
+ range;
+
+ if (typeof selectionStart === 'number') {
+ return selectionStart === valLength;
+ }
+ if (document.selection) {
+ range = document.selection.createRange();
+ range.moveStart('character', -valLength);
+ return valLength === range.text.length;
+ }
+ return true;
+ },
+
+ onKeyPress: function (e) {
+ var that = this;
+
+ // If suggestions are hidden and user presses arrow down, display suggestions:
+ if (!that.disabled && !that.visible && e.which === keys.DOWN && that.currentValue) {
+ that.suggest();
+ return;
+ }
+
+ if (that.disabled || !that.visible) {
+ return;
+ }
+
+ switch (e.which) {
+ case keys.ESC:
+ that.el.val(that.currentValue);
+ that.hide();
+ break;
+ case keys.RIGHT:
+ if (that.hint && that.options.onHint && that.isCursorAtEnd()) {
+ that.selectHint();
+ break;
+ }
+ return;
+ case keys.TAB:
+ if (that.hint && that.options.onHint) {
+ that.selectHint();
+ return;
+ }
+ if (that.selectedIndex === -1) {
+ that.hide();
+ return;
+ }
+ that.select(that.selectedIndex);
+ if (that.options.tabDisabled === false) {
+ return;
+ }
+ break;
+ case keys.RETURN:
+ if (that.selectedIndex === -1) {
+ that.hide();
+ return;
+ }
+ that.select(that.selectedIndex);
+ break;
+ case keys.UP:
+ that.moveUp();
+ break;
+ case keys.DOWN:
+ that.moveDown();
+ break;
+ default:
+ return;
+ }
+
+ // Cancel event if function did not return:
+ e.stopImmediatePropagation();
+ e.preventDefault();
+ },
+
+ onKeyUp: function (e) {
+ var that = this;
+
+ if (that.disabled) {
+ return;
+ }
+
+ switch (e.which) {
+ case keys.UP:
+ case keys.DOWN:
+ return;
+ }
+
+ clearInterval(that.onChangeInterval);
+
+ if (that.currentValue !== that.el.val()) {
+ that.findBestHint();
+ if (that.options.deferRequestBy > 0) {
+ // Defer lookup in case when value changes very quickly:
+ that.onChangeInterval = setInterval(function () {
+ that.onValueChange();
+ }, that.options.deferRequestBy);
+ } else {
+ that.onValueChange();
+ }
+ }
+ },
+
+ onValueChange: function () {
+ var that = this,
+ options = that.options,
+ value = that.el.val(),
+ query = that.getQuery(value);
+
+ if (that.selection && that.currentValue !== query) {
+ that.selection = null;
+ (options.onInvalidateSelection || $.noop).call(that.element);
+ }
+
+ clearInterval(that.onChangeInterval);
+ that.currentValue = value;
+ that.selectedIndex = -1;
+
+ // Check existing suggestion for the match before proceeding:
+ if (options.triggerSelectOnValidInput && that.isExactMatch(query)) {
+ that.select(0);
+ return;
+ }
+
+ if (query.length < options.minChars) {
+ that.hide();
+ } else {
+ that.getSuggestions(query);
+ }
+ },
+
+ isExactMatch: function (query) {
+ var suggestions = this.suggestions;
+
+ return (suggestions.length === 1 && suggestions[0].value.toLowerCase() === query.toLowerCase());
+ },
+
+ getQuery: function (value) {
+ var delimiter = this.options.delimiter,
+ parts;
+
+ if (!delimiter) {
+ return value;
+ }
+ parts = value.split(delimiter);
+ return $.trim(parts[parts.length - 1]);
+ },
+
+ getSuggestionsLocal: function (query) {
+ var that = this,
+ options = that.options,
+ queryLowerCase = query.toLowerCase(),
+ filter = options.lookupFilter,
+ limit = parseInt(options.lookupLimit, 10),
+ data;
+
+ data = {
+ suggestions: $.grep(options.lookup, function (suggestion) {
+ return filter(suggestion, query, queryLowerCase);
+ })
+ };
+
+ if (limit && data.suggestions.length > limit) {
+ data.suggestions = data.suggestions.slice(0, limit);
+ }
+
+ return data;
+ },
+
+ getSuggestions: function (q) {
+ var response,
+ that = this,
+ options = that.options,
+ serviceUrl = options.serviceUrl,
+ params,
+ cacheKey,
+ ajaxSettings;
+
+ options.params[options.paramName] = q;
+ params = options.ignoreParams ? null : options.params;
+
+ if (options.onSearchStart.call(that.element, options.params) === false) {
+ return;
+ }
+
+ if ($.isFunction(options.lookup)){
+ options.lookup(q, function (data) {
+ that.suggestions = data.suggestions;
+ that.suggest();
+ options.onSearchComplete.call(that.element, q, data.suggestions);
+ });
+ return;
+ }
+
+ if (that.isLocal) {
+ response = that.getSuggestionsLocal(q);
+ } else {
+ if ($.isFunction(serviceUrl)) {
+ serviceUrl = serviceUrl.call(that.element, q);
+ }
+ cacheKey = serviceUrl + '?' + $.param(params || {});
+ response = that.cachedResponse[cacheKey];
+ }
+
+ if (response && $.isArray(response.suggestions)) {
+ that.suggestions = response.suggestions;
+ that.suggest();
+ options.onSearchComplete.call(that.element, q, response.suggestions);
+ } else if (!that.isBadQuery(q)) {
+ that.abortAjax();
+
+ ajaxSettings = {
+ url: serviceUrl,
+ data: params,
+ type: options.type,
+ dataType: options.dataType
+ };
+
+ $.extend(ajaxSettings, options.ajaxSettings);
+
+ that.currentRequest = $.ajax(ajaxSettings).done(function (data) {
+ var result;
+ that.currentRequest = null;
+ result = options.transformResult(data, q);
+ that.processResponse(result, q, cacheKey);
+ options.onSearchComplete.call(that.element, q, result.suggestions);
+ }).fail(function (jqXHR, textStatus, errorThrown) {
+ options.onSearchError.call(that.element, q, jqXHR, textStatus, errorThrown);
+ });
+ } else {
+ options.onSearchComplete.call(that.element, q, []);
+ }
+ },
+
+ isBadQuery: function (q) {
+ if (!this.options.preventBadQueries){
+ return false;
+ }
+
+ var badQueries = this.badQueries,
+ i = badQueries.length;
+
+ while (i--) {
+ if (q.indexOf(badQueries[i]) === 0) {
+ return true;
+ }
+ }
+
+ return false;
+ },
+
+ hide: function () {
+ var that = this,
+ container = $(that.suggestionsContainer);
+
+ if ($.isFunction(that.options.onHide) && that.visible) {
+ that.options.onHide.call(that.element, container);
+ }
+
+ that.visible = false;
+ that.selectedIndex = -1;
+ clearInterval(that.onChangeInterval);
+ $(that.suggestionsContainer).hide();
+ that.signalHint(null);
+ },
+
+ suggest: function () {
+ if (this.suggestions.length === 0) {
+ if (this.options.showNoSuggestionNotice) {
+ this.noSuggestions();
+ } else {
+ this.hide();
+ }
+ return;
+ }
+
+ var that = this,
+ options = that.options,
+ groupBy = options.groupBy,
+ formatResult = options.formatResult,
+ value = that.getQuery(that.currentValue),
+ className = that.classes.suggestion,
+ classSelected = that.classes.selected,
+ container = $(that.suggestionsContainer),
+ noSuggestionsContainer = $(that.noSuggestionsContainer),
+ beforeRender = options.beforeRender,
+ html = '',
+ category,
+ formatGroup = function (suggestion, index) {
+ var currentCategory = suggestion.data[groupBy];
+
+ if (category === currentCategory){
+ return '';
+ }
+
+ category = currentCategory;
+
+ return '' + category + '
';
+ };
+
+ if (options.triggerSelectOnValidInput && that.isExactMatch(value)) {
+ that.select(0);
+ return;
+ }
+
+ // Build suggestions inner HTML:
+ $.each(that.suggestions, function (i, suggestion) {
+ if (groupBy){
+ html += formatGroup(suggestion, value, i);
+ }
+
+ html += '' + formatResult(suggestion, value) + '
';
+ });
+
+ this.adjustContainerWidth();
+
+ noSuggestionsContainer.detach();
+ container.html(html);
+
+ if ($.isFunction(beforeRender)) {
+ beforeRender.call(that.element, container);
+ }
+
+ that.fixPosition();
+ container.show();
+
+ // Select first value by default:
+ if (options.autoSelectFirst) {
+ that.selectedIndex = 0;
+ container.scrollTop(0);
+ container.children('.' + className).first().addClass(classSelected);
+ }
+
+ that.visible = true;
+ that.findBestHint();
+ },
+
+ noSuggestions: function() {
+ var that = this,
+ container = $(that.suggestionsContainer),
+ noSuggestionsContainer = $(that.noSuggestionsContainer);
+
+ this.adjustContainerWidth();
+
+ // Some explicit steps. Be careful here as it easy to get
+ // noSuggestionsContainer removed from DOM if not detached properly.
+ noSuggestionsContainer.detach();
+ container.empty(); // clean suggestions if any
+ container.append(noSuggestionsContainer);
+
+ that.fixPosition();
+
+ container.show();
+ that.visible = true;
+ },
+
+ adjustContainerWidth: function() {
+ var that = this,
+ options = that.options,
+ width,
+ container = $(that.suggestionsContainer);
+
+ // If width is auto, adjust width before displaying suggestions,
+ // because if instance was created before input had width, it will be zero.
+ // Also it adjusts if input width has changed.
+ // -2px to account for suggestions border.
+ if (options.width === 'auto') {
+ width = that.el.outerWidth() - 2;
+ container.width(width > 0 ? width : 300);
+ }
+ },
+
+ findBestHint: function () {
+ var that = this,
+ value = that.el.val().toLowerCase(),
+ bestMatch = null;
+
+ if (!value) {
+ return;
+ }
+
+ $.each(that.suggestions, function (i, suggestion) {
+ var foundMatch = suggestion.value.toLowerCase().indexOf(value) === 0;
+ if (foundMatch) {
+ bestMatch = suggestion;
+ }
+ return !foundMatch;
+ });
+
+ that.signalHint(bestMatch);
+ },
+
+ signalHint: function (suggestion) {
+ var hintValue = '',
+ that = this;
+ if (suggestion) {
+ hintValue = that.currentValue + suggestion.value.substr(that.currentValue.length);
+ }
+ if (that.hintValue !== hintValue) {
+ that.hintValue = hintValue;
+ that.hint = suggestion;
+ (this.options.onHint || $.noop)(hintValue);
+ }
+ },
+
+ verifySuggestionsFormat: function (suggestions) {
+ // If suggestions is string array, convert them to supported format:
+ if (suggestions.length && typeof suggestions[0] === 'string') {
+ return $.map(suggestions, function (value) {
+ return { value: value, data: null };
+ });
+ }
+
+ return suggestions;
+ },
+
+ validateOrientation: function(orientation, fallback) {
+ orientation = $.trim(orientation || '').toLowerCase();
+
+ if($.inArray(orientation, ['auto', 'bottom', 'top']) === -1){
+ orientation = fallback;
+ }
+
+ return orientation;
+ },
+
+ processResponse: function (result, originalQuery, cacheKey) {
+ var that = this,
+ options = that.options;
+
+ result.suggestions = that.verifySuggestionsFormat(result.suggestions);
+
+ // Cache results if cache is not disabled:
+ if (!options.noCache) {
+ that.cachedResponse[cacheKey] = result;
+ if (options.preventBadQueries && result.suggestions.length === 0) {
+ that.badQueries.push(originalQuery);
+ }
+ }
+
+ // Return if originalQuery is not matching current query:
+ if (originalQuery !== that.getQuery(that.currentValue)) {
+ return;
+ }
+
+ that.suggestions = result.suggestions;
+ that.suggest();
+ },
+
+ activate: function (index) {
+ var that = this,
+ activeItem,
+ selected = that.classes.selected,
+ container = $(that.suggestionsContainer),
+ children = container.find('.' + that.classes.suggestion);
+
+ container.find('.' + selected).removeClass(selected);
+
+ that.selectedIndex = index;
+
+ if (that.selectedIndex !== -1 && children.length > that.selectedIndex) {
+ activeItem = children.get(that.selectedIndex);
+ $(activeItem).addClass(selected);
+ return activeItem;
+ }
+
+ return null;
+ },
+
+ selectHint: function () {
+ var that = this,
+ i = $.inArray(that.hint, that.suggestions);
+
+ that.select(i);
+ },
+
+ select: function (i) {
+ var that = this;
+ that.hide();
+ that.onSelect(i);
+ },
+
+ moveUp: function () {
+ var that = this;
+
+ if (that.selectedIndex === -1) {
+ return;
+ }
+
+ if (that.selectedIndex === 0) {
+ $(that.suggestionsContainer).children().first().removeClass(that.classes.selected);
+ that.selectedIndex = -1;
+ that.el.val(that.currentValue);
+ that.findBestHint();
+ return;
+ }
+
+ that.adjustScroll(that.selectedIndex - 1);
+ },
+
+ moveDown: function () {
+ var that = this;
+
+ if (that.selectedIndex === (that.suggestions.length - 1)) {
+ return;
+ }
+
+ that.adjustScroll(that.selectedIndex + 1);
+ },
+
+ adjustScroll: function (index) {
+ var that = this,
+ activeItem = that.activate(index);
+
+ if (!activeItem) {
+ return;
+ }
+
+ var offsetTop,
+ upperBound,
+ lowerBound,
+ heightDelta = $(activeItem).outerHeight();
+
+ offsetTop = activeItem.offsetTop;
+ upperBound = $(that.suggestionsContainer).scrollTop();
+ lowerBound = upperBound + that.options.maxHeight - heightDelta;
+
+ if (offsetTop < upperBound) {
+ $(that.suggestionsContainer).scrollTop(offsetTop);
+ } else if (offsetTop > lowerBound) {
+ $(that.suggestionsContainer).scrollTop(offsetTop - that.options.maxHeight + heightDelta);
+ }
+
+ if (!that.options.preserveInput) {
+ that.el.val(that.getValue(that.suggestions[index].value));
+ }
+ that.signalHint(null);
+ },
+
+ onSelect: function (index) {
+ var that = this,
+ onSelectCallback = that.options.onSelect,
+ suggestion = that.suggestions[index];
+
+ that.currentValue = that.getValue(suggestion.value);
+
+ if (that.currentValue !== that.el.val() && !that.options.preserveInput) {
+ that.el.val(that.currentValue);
+ }
+
+ that.signalHint(null);
+ that.suggestions = [];
+ that.selection = suggestion;
+
+ if ($.isFunction(onSelectCallback)) {
+ onSelectCallback.call(that.element, suggestion);
+ }
+ },
+
+ getValue: function (value) {
+ var that = this,
+ delimiter = that.options.delimiter,
+ currentValue,
+ parts;
+
+ if (!delimiter) {
+ return value;
+ }
+
+ currentValue = that.currentValue;
+ parts = currentValue.split(delimiter);
+
+ if (parts.length === 1) {
+ return value;
+ }
+
+ return currentValue.substr(0, currentValue.length - parts[parts.length - 1].length) + value;
+ },
+
+ dispose: function () {
+ var that = this;
+ that.el.off('.autocomplete').removeData('autocomplete');
+ that.disableKillerFn();
+ $(window).off('resize.autocomplete', that.fixPositionCapture);
+ $(that.suggestionsContainer).remove();
+ }
+ };
+
+ // Create chainable jQuery plugin:
+ $.fn.autocomplete = $.fn.devbridgeAutocomplete = function (options, args) {
+ var dataKey = 'autocomplete';
+ // If function invoked without argument return
+ // instance of the first matched element:
+ if (arguments.length === 0) {
+ return this.first().data(dataKey);
+ }
+
+ return this.each(function () {
+ var inputElement = $(this),
+ instance = inputElement.data(dataKey);
+
+ if (typeof options === 'string') {
+ if (instance && typeof instance[options] === 'function') {
+ instance[options](args);
+ }
+ } else {
+ // If instance already exists, destroy it:
+ if (instance && instance.dispose) {
+ instance.dispose();
+ }
+ instance = new Autocomplete(this, options);
+ inputElement.data(dataKey, instance);
+ }
+ });
+ };
+}));
diff --git a/asset/static/js/bootstrap.min.js b/asset/static/js/bootstrap.min.js
new file mode 100755
index 0000000..e79c065
--- /dev/null
+++ b/asset/static/js/bootstrap.min.js
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.3.6 (http://getbootstrap.com)
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under the MIT license
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1||b[0]>2)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 3")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.6",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a(f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.6",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),a(c.target).is('input[type="radio"]')||a(c.target).is('input[type="checkbox"]')||c.preventDefault()}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.6",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));return a>this.$items.length-1||0>a?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.6",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",f)))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.6",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger(a.Event("shown.bs.dropdown",h))}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){var e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&jdocument.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth
',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),c.isInStateTrue()?void 0:(clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide())},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-mo.width?"left":"left"==h&&k.left-lg.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;jg.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;(e||!/destroy|hide/.test(b))&&(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.6",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:''}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.6",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b=e[a]&&(void 0===e[a+1]||b .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.6",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return c>e?"top":!1;if("bottom"==this.affixed)return null!=c?e+this.unpin<=f.top?!1:"bottom":a-d>=e+g?!1:"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&c>=e?"top":null!=d&&i+j>=a-d?"bottom":!1},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
\ No newline at end of file
diff --git a/asset/static/js/calendar/fullcalendar.min.js b/asset/static/js/calendar/fullcalendar.min.js
new file mode 100755
index 0000000..5c3cce2
--- /dev/null
+++ b/asset/static/js/calendar/fullcalendar.min.js
@@ -0,0 +1,9 @@
+/*!
+ * FullCalendar v2.6.1
+ * Docs & License: http://fullcalendar.io/
+ * (c) 2015 Adam Shaw
+ */
+!function(a){"function"==typeof define&&define.amd?define(["jquery","moment"],a):"object"==typeof exports?module.exports=a(require("jquery"),require("moment")):a(jQuery,moment)}(function(a,b){function c(a){return Q(a,Ra)}function d(b){var c,d={views:b.views||{}};return a.each(b,function(b,e){"views"!=b&&(a.isPlainObject(e)&&!/(time|duration|interval)$/i.test(b)&&-1==a.inArray(b,Ra)?(c=null,a.each(e,function(a,e){/^(month|week|day|default|basic(Week|Day)?|agenda(Week|Day)?)$/.test(a)?(d.views[a]||(d.views[a]={}),d.views[a][b]=e):(c||(c={}),c[a]=e)}),c&&(d[b]=c)):d[b]=e)}),d}function e(a,b){b.left&&a.css({"border-left-width":1,"margin-left":b.left-1}),b.right&&a.css({"border-right-width":1,"margin-right":b.right-1})}function f(a){a.css({"margin-left":"","margin-right":"","border-left-width":"","border-right-width":""})}function g(){a("body").addClass("fc-not-allowed")}function h(){a("body").removeClass("fc-not-allowed")}function i(b,c,d){var e=Math.floor(c/b.length),f=Math.floor(c-e*(b.length-1)),g=[],h=[],i=[],k=0;j(b),b.each(function(c,d){var j=c===b.length-1?f:e,l=a(d).outerHeight(!0);j>l?(g.push(d),h.push(l),i.push(a(d).height())):k+=l}),d&&(c-=k,e=Math.floor(c/g.length),f=Math.floor(c-e*(g.length-1))),a(g).each(function(b,c){var d=b===g.length-1?f:e,j=h[b],k=i[b],l=d-(j-k);d>j&&a(c).height(l)})}function j(a){a.height("")}function k(b){var c=0;return b.find("> span").each(function(b,d){var e=a(d).outerWidth();e>c&&(c=e)}),c++,b.width(c),c}function l(a,b){return a.height(b).addClass("fc-scroller"),a[0].scrollHeight-1>a[0].clientHeight?!0:(m(a),!1)}function m(a){a.height("").removeClass("fc-scroller")}function n(b){var c=b.css("position"),d=b.parents().filter(function(){var b=a(this);return/(auto|scroll)/.test(b.css("overflow")+b.css("overflow-y")+b.css("overflow-x"))}).eq(0);return"fixed"!==c&&d.length?d:a(b[0].ownerDocument||document)}function o(a){var b=a.offset();return{left:b.left,right:b.left+a.outerWidth(),top:b.top,bottom:b.top+a.outerHeight()}}function p(a){var b=a.offset(),c=r(a),d=b.left+u(a,"border-left-width")+c.left,e=b.top+u(a,"border-top-width")+c.top;return{left:d,right:d+a[0].clientWidth,top:e,bottom:e+a[0].clientHeight}}function q(a){var b=a.offset(),c=b.left+u(a,"border-left-width")+u(a,"padding-left"),d=b.top+u(a,"border-top-width")+u(a,"padding-top");return{left:c,right:c+a.width(),top:d,bottom:d+a.height()}}function r(a){var b=a.innerWidth()-a[0].clientWidth,c={left:0,right:0,top:0,bottom:a.innerHeight()-a[0].clientHeight};return s()&&"rtl"==a.css("direction")?c.left=b:c.right=b,c}function s(){return null===Sa&&(Sa=t()),Sa}function t(){var b=a("").css({position:"absolute",top:-1e3,left:0,border:0,padding:0,overflow:"scroll",direction:"rtl"}).appendTo("body"),c=b.children(),d=c.offset().left>b.offset().left;return b.remove(),d}function u(a,b){return parseFloat(a.css(b))||0}function v(a){return 1==a.which&&!a.ctrlKey}function w(a,b){var c={left:Math.max(a.left,b.left),right:Math.min(a.right,b.right),top:Math.max(a.top,b.top),bottom:Math.min(a.bottom,b.bottom)};return c.lefti&&j>g?(g>=i?(c=g.clone(),e=!0):(c=i.clone(),e=!1),j>=h?(d=h.clone(),f=!0):(d=j.clone(),f=!1),{start:c,end:d,isStart:e,isEnd:f}):void 0}function F(a,c){return b.duration({days:a.clone().stripTime().diff(c.clone().stripTime(),"days"),ms:a.time()-c.time()})}function G(a,c){return b.duration({days:a.clone().stripTime().diff(c.clone().stripTime(),"days")})}function H(a,c,d){return b.duration(Math.round(a.diff(c,d,!0)),d)}function I(a,b){var c,d,e;for(c=0;c=1&&ba(e)));c++);return d}function J(a,c,d){return null!=d?d.diff(c,a,!0):b.isDuration(c)?c.as(a):c.end.diff(c.start,a,!0)}function K(a,b,c){var d;return N(c)?(b-a)/c:(d=c.asMonths(),Math.abs(d)>=1&&ba(d)?b.diff(a,"months",!0)/d:b.diff(a,"days",!0)/c.asDays())}function L(a,b){var c,d;return N(a)||N(b)?a/b:(c=a.asMonths(),d=b.asMonths(),Math.abs(c)>=1&&ba(c)&&Math.abs(d)>=1&&ba(d)?c/d:a.asDays()/b.asDays())}function M(a,c){var d;return N(a)?b.duration(a*c):(d=a.asMonths(),Math.abs(d)>=1&&ba(d)?b.duration({months:d*c}):b.duration({days:a.asDays()*c}))}function N(a){return Boolean(a.hours()||a.minutes()||a.seconds()||a.milliseconds())}function O(a){return"[object Date]"===Object.prototype.toString.call(a)||a instanceof Date}function P(a){return/^\d+\:\d+(?:\:\d+\.?(?:\d{3})?)?$/.test(a)}function Q(a,b){var c,d,e,f,g,h,i={};if(b)for(c=0;c=0;f--)if(g=a[f][d],"object"==typeof g)e.unshift(g);else if(void 0!==g){i[d]=g;break}e.length&&(i[d]=Q(e))}for(c=a.length-1;c>=0;c--){h=a[c];for(d in h)d in i||(i[d]=h[d])}return i}function R(a){var b=function(){};return b.prototype=a,new b}function S(a,b){for(var c in a)U(a,c)&&(b[c]=a[c])}function T(a,b){var c,d,e=["constructor","toString","valueOf"];for(c=0;c /g,">").replace(/'/g,"'").replace(/"/g,""").replace(/\n/g," ")}function Z(a){return a.replace(/&.*?;/g,"")}function $(b){var c=[];return a.each(b,function(a,b){null!=b&&c.push(a+":"+b)}),c.join(";")}function _(a){return a.charAt(0).toUpperCase()+a.slice(1)}function aa(a,b){return a-b}function ba(a){return a%1===0}function ca(a,b){var c=a[b];return function(){return c.apply(a,arguments)}}function da(a,b){var c,d,e,f,g=function(){var h=+new Date-f;b>h&&h>0?c=setTimeout(g,b-h):(c=null,a.apply(e,d),c||(e=d=null))};return function(){e=this,d=arguments,f=+new Date,c||(c=setTimeout(g,b))}}function ea(c,d,e){var f,g,h,i,j=c[0],k=1==c.length&&"string"==typeof j;return b.isMoment(j)?(i=b.apply(null,c),ga(j,i)):O(j)||void 0===j?i=b.apply(null,c):(f=!1,g=!1,k?Za.test(j)?(j+="-01",c=[j],f=!0,g=!0):(h=$a.exec(j))&&(f=!h[5],g=!0):a.isArray(j)&&(g=!0),i=d||f?b.utc.apply(b,c):b.apply(null,c),f?(i._ambigTime=!0,i._ambigZone=!0):e&&(g?i._ambigZone=!0:k&&(i.utcOffset?i.utcOffset(j):i.zone(j)))),i._fullCalendar=!0,i}function fa(a,c){var d,e,f=!1,g=!1,h=a.length,i=[];for(d=0;h>d;d++)e=a[d],b.isMoment(e)||(e=Pa.moment.parseZone(e)),f=f||e._ambigTime,g=g||e._ambigZone,i.push(e);for(d=0;h>d;d++)e=i[d],c||!f||e._ambigTime?g&&!e._ambigZone&&(i[d]=e.clone().stripZone()):i[d]=e.clone().stripTime();return i}function ga(a,b){a._ambigTime?b._ambigTime=!0:b._ambigTime&&(b._ambigTime=!1),a._ambigZone?b._ambigZone=!0:b._ambigZone&&(b._ambigZone=!1)}function ha(a,b){a.year(b[0]||0).month(b[1]||0).date(b[2]||0).hours(b[3]||0).minutes(b[4]||0).seconds(b[5]||0).milliseconds(b[6]||0)}function ia(a,b){return ab.format.call(a,b)}function ja(a,b){return ka(a,pa(b))}function ka(a,b){var c,d="";for(c=0;cg&&(f=oa(a,b,j,k,c[h]),f!==!1);h--)m=f+m;for(i=g;h>=i;i++)n+=la(a,c[i]),o+=la(b,c[i]);return(n||o)&&(p=e?o+d+n:n+d+o),l+p+m}function oa(a,b,c,d,e){var f,g;return"string"==typeof e?e:(f=e.token)&&(g=cb[f.charAt(0)],g&&c.isSame(d,g))?ia(a,f):!1}function pa(a){return a in db?db[a]:db[a]=qa(a)}function qa(a){for(var b,c=[],d=/\[([^\]]*)\]|\(([^\)]*)\)|(LTS|LT|(\w)\4*o?)|([^\w\[\(]+)/g;b=d.exec(a);)b[1]?c.push(b[1]):b[2]?c.push({maybe:qa(b[2])}):b[3]?c.push({token:b[3]}):b[5]&&c.push(b[5]);return c}function ra(){}function sa(a,b){var c;return U(b,"constructor")&&(c=b.constructor),"function"!=typeof c&&(c=b.constructor=function(){a.apply(this,arguments)}),c.prototype=R(a.prototype),S(b,c.prototype),T(b,c.prototype),S(a,c),c}function ta(a,b){S(b.prototype||b,a.prototype)}function ua(a,b){return a||b?a&&b?a.component===b.component&&va(a,b)&&va(b,a):!1:!0}function va(a,b){for(var c in a)if(!/^(component|left|right|top|bottom)$/.test(c)&&a[c]!==b[c])return!1;return!0}function wa(a){var b=ya(a);return"background"===b||"inverse-background"===b}function xa(a){return"inverse-background"===ya(a)}function ya(a){return X((a.source||{}).rendering,a.rendering)}function za(a){var b,c,d={};for(b=0;b=a.leftCol)return!0;return!1}function Da(a,b){return a.leftCol-b.leftCol}function Ea(a){var b,c,d,e=[];for(b=0;bb.top&&a.top ").prependTo(c),S=N.header=new Ma(N,O),T=S.render(),T&&c.prepend(T),i(O.defaultView),O.handleWindowResize&&(Y=da(m,O.windowResizeDelay),a(window).resize(Y))}function g(){W&&W.removeElement(),S.removeElement(),U.remove(),c.removeClass("fc fc-ltr fc-rtl fc-unthemed ui-widget"),Y&&a(window).unbind("resize",Y)}function h(){return c.is(":visible")}function i(b){ca++,W&&b&&W.type!==b&&(S.deactivateButton(W.type),H(),W.removeElement(),W=N.view=null),!W&&b&&(W=N.view=ba[b]||(ba[b]=N.instantiateView(b)),W.setElement(a("
").appendTo(U)),S.activateButton(b)),W&&(Z=W.massageCurrentDate(Z),W.displaying&&Z.isWithin(W.intervalStart,W.intervalEnd)||h()&&(W.display(Z),I(),u(),v(),q())),I(),ca--}function j(a){return h()?(a&&l(),ca++,W.updateSize(!0),ca--,!0):void 0}function k(){h()&&l()}function l(){X="number"==typeof O.contentHeight?O.contentHeight:"number"==typeof O.height?O.height-(T?T.outerHeight(!0):0):Math.round(U.width()/Math.max(O.aspectRatio,.5))}function m(a){!ca&&a.target===window&&W.start&&j(!0)&&W.trigger("windowResize",aa)}function n(){p(),r()}function o(){h()&&(H(),W.displayEvents(ea),I())}function p(){H(),W.clearEvents(),I()}function q(){!O.lazyFetching||$(W.start,W.end)?r():o()}function r(){_(W.start,W.end)}function s(a){ea=a,o()}function t(){o()}function u(){S.updateTitle(W.title)}function v(){var a=N.getNow();a.isWithin(W.intervalStart,W.intervalEnd)?S.disableButton("today"):S.enableButton("today")}function w(a,b){W.select(N.buildSelectSpan.apply(N,arguments))}function x(){W&&W.unselect()}function y(){Z=W.computePrevDate(Z),i()}function z(){Z=W.computeNextDate(Z),i()}function A(){Z.add(-1,"years"),i()}function B(){Z.add(1,"years"),i()}function C(){Z=N.getNow(),i()}function D(a){Z=N.moment(a).stripZone(),i()}function E(a){Z.add(b.duration(a)),i()}function F(a,b){var c;b=b||"day",c=N.getViewSpec(b)||N.getUnitViewSpec(b),Z=a.clone(),i(c?c.type:null)}function G(){return N.applyTimezone(Z)}function H(){U.css({width:"100%",height:U.height(),overflow:"hidden"})}function I(){U.css({width:"",height:"",overflow:""})}function J(){return N}function K(){return W}function L(a,b){return void 0===b?O[a]:void(("height"==a||"contentHeight"==a||"aspectRatio"==a)&&(O[a]=b,j(!0)))}function M(a,b){var c=Array.prototype.slice.call(arguments,2);return b=b||aa,this.triggerWith(a,b,c),O[a]?O[a].apply(b,c):void 0}var N=this;N.initOptions(d||{});var O=this.options;N.render=e,N.destroy=g,N.refetchEvents=n,N.reportEvents=s,N.reportEventChange=t,N.rerenderEvents=o,N.changeView=i,N.select=w,N.unselect=x,N.prev=y,N.next=z,N.prevYear=A,N.nextYear=B,N.today=C,N.gotoDate=D,N.incrementDate=E,N.zoomTo=F,N.getDate=G,N.getCalendar=J,N.getView=K,N.option=L,N.trigger=M;var P=R(La(O.lang));if(O.monthNames&&(P._months=O.monthNames),O.monthNamesShort&&(P._monthsShort=O.monthNamesShort),O.dayNames&&(P._weekdays=O.dayNames),O.dayNamesShort&&(P._weekdaysShort=O.dayNamesShort),null!=O.firstDay){var Q=R(P._week);Q.dow=O.firstDay,P._week=Q}P._fullCalendar_weekCalc=function(a){return"function"==typeof a?a:"local"===a?a:"iso"===a||"ISO"===a?"ISO":void 0}(O.weekNumberCalculation),N.defaultAllDayEventDuration=b.duration(O.defaultAllDayEventDuration),N.defaultTimedEventDuration=b.duration(O.defaultTimedEventDuration),N.moment=function(){var a;return"local"===O.timezone?(a=Pa.moment.apply(null,arguments),a.hasTime()&&a.local()):a="UTC"===O.timezone?Pa.moment.utc.apply(null,arguments):Pa.moment.parseZone.apply(null,arguments),"_locale"in a?a._locale=P:a._lang=P,a},N.getIsAmbigTimezone=function(){return"local"!==O.timezone&&"UTC"!==O.timezone},N.applyTimezone=function(a){if(!a.hasTime())return a.clone();var b,c=N.moment(a.toArray()),d=a.time()-c.time();return d&&(b=c.clone().add(d),a.time()-b.time()===0&&(c=b)),c},N.getNow=function(){var a=O.now;return"function"==typeof a&&(a=a()),N.moment(a).stripZone()},N.getEventEnd=function(a){return a.end?a.end.clone():N.getDefaultEventEnd(a.allDay,a.start)},N.getDefaultEventEnd=function(a,b){var c=b.clone();return a?c.stripTime().add(N.defaultAllDayEventDuration):c.add(N.defaultTimedEventDuration),N.getIsAmbigTimezone()&&c.stripZone(),c},N.humanizeDuration=function(a){return(a.locale||a.lang).call(a,O.lang).humanize()},Na.call(N,O);var S,T,U,V,W,X,Y,Z,$=N.isFetchNeeded,_=N.fetchEvents,aa=c[0],ba={},ca=0,ea=[];Z=null!=O.defaultDate?N.moment(O.defaultDate).stripZone():N.getNow(),N.getSuggestedViewHeight=function(){return void 0===X&&k(),X},N.isHeightAuto=function(){return"auto"===O.contentHeight||"auto"===O.height},N.freezeContentHeight=H,N.unfreezeContentHeight=I,N.initialize()}function Ka(b){a.each(tb,function(a,c){null==b[a]&&(b[a]=c(b))})}function La(a){var c=b.localeData||b.langData;return c.call(b,a)||c.call(b,"en")}function Ma(b,c){function d(){var b=c.header;return n=c.theme?"ui":"fc",b?o=a("
").append(f("left")).append(f("right")).append(f("center")).append('
'):void 0}function e(){o.remove(),o=a()}function f(d){var e=a('
'),f=c.header[d];return f&&a.each(f.split(" "),function(d){var f,g=a(),h=!0;a.each(this.split(","),function(d,e){var f,i,j,k,l,m,o,q,r,s;"title"==e?(g=g.add(a(" ")),h=!1):((f=(b.options.customButtons||{})[e])?(j=function(a){f.click&&f.click.call(s[0],a)},k="",l=f.text):(i=b.getViewSpec(e))?(j=function(){b.changeView(e)},p.push(e),k=i.buttonTextOverride,l=i.buttonTextDefault):b[e]&&(j=function(){b[e]()},k=(b.overrides.buttonText||{})[e],l=c.buttonText[e]),j&&(m=f?f.themeIcon:c.themeButtonIcons[e],o=f?f.icon:c.buttonIcons[e],q=k?Y(k):m&&c.theme?" ":o&&!c.theme?" ":Y(l),r=["fc-"+e+"-button",n+"-button",n+"-state-default"],s=a(''+q+" ").click(function(a){s.hasClass(n+"-state-disabled")||(j(a),(s.hasClass(n+"-state-active")||s.hasClass(n+"-state-disabled"))&&s.removeClass(n+"-state-hover"))}).mousedown(function(){s.not("."+n+"-state-active").not("."+n+"-state-disabled").addClass(n+"-state-down")}).mouseup(function(){s.removeClass(n+"-state-down")}).hover(function(){s.not("."+n+"-state-active").not("."+n+"-state-disabled").addClass(n+"-state-hover")},function(){s.removeClass(n+"-state-hover").removeClass(n+"-state-down")}),g=g.add(s)))}),h&&g.first().addClass(n+"-corner-left").end().last().addClass(n+"-corner-right").end(),g.length>1?(f=a("
"),h&&f.addClass("fc-button-group"),f.append(g),e.append(f)):e.append(g)}),e}function g(a){o.find("h2").text(a)}function h(a){o.find(".fc-"+a+"-button").addClass(n+"-state-active")}function i(a){o.find(".fc-"+a+"-button").removeClass(n+"-state-active")}function j(a){o.find(".fc-"+a+"-button").attr("disabled","disabled").addClass(n+"-state-disabled")}function k(a){o.find(".fc-"+a+"-button").removeAttr("disabled").removeClass(n+"-state-disabled")}function l(){return p}var m=this;m.render=d,m.removeElement=e,m.updateTitle=g,m.activateButton=h,m.deactivateButton=i,m.disableButton=j,m.enableButton=k,m.getViewsWithButtons=l;var n,o=a(),p=[]}function Na(c){function d(a,b){return!L||L>a||b>M}function e(a,b){L=a,M=b,T=[];var c=++R,d=Q.length;S=d;for(var e=0;d>e;e++)f(Q[e],c)}function f(b,c){g(b,function(d){var e,f,g,h=a.isArray(b.events);if(c==R){if(d)for(e=0;e=c&&b.end<=d}function J(a,b){var c=a.start.clone().stripZone(),d=K.getEventEnd(a).stripZone();return b.startc}var K=this;K.isFetchNeeded=d,K.fetchEvents=e,K.addEventSource=h,K.removeEventSource=j,K.updateEvent=m,K.renderEvent=p,K.removeEvents=q,K.clientEvents=r,K.mutateEvent=x,K.normalizeEventDates=u,K.normalizeEventTimes=v;var L,M,N=K.reportEvents,O={events:[]},Q=[O],R=0,S=0,T=[];a.each((c.events?[c.events]:[]).concat(c.eventSources||[]),function(a,b){var c=i(b);c&&Q.push(c)}),K.getBusinessHoursEvents=z,K.isEventSpanAllowed=A,K.isExternalSpanAllowed=B,K.isSelectionSpanAllowed=C,K.getEventCache=function(){return T}}function Oa(a){a._allDay=a.allDay,a._start=a.start.clone(),a._end=a.end?a.end.clone():null}var Pa=a.fullCalendar={version:"2.6.1",internalApiVersion:3},Qa=Pa.views={};a.fn.fullCalendar=function(b){var c=Array.prototype.slice.call(arguments,1),d=this;return this.each(function(e,f){var g,h=a(f),i=h.data("fullCalendar");"string"==typeof b?i&&a.isFunction(i[b])&&(g=i[b].apply(i,c),e||(d=g),"destroy"===b&&h.removeData("fullCalendar")):i||(i=new pb(h,b),h.data("fullCalendar",i),i.render())}),d};var Ra=["header","buttonText","buttonIcons","themeButtonIcons"];Pa.intersectRanges=E,Pa.applyAll=W,Pa.debounce=da,Pa.isInt=ba,Pa.htmlEscape=Y,Pa.cssToStr=$,Pa.proxy=ca,Pa.capitaliseFirstLetter=_,Pa.getOuterRect=o,Pa.getClientRect=p,Pa.getContentRect=q,Pa.getScrollbarWidths=r;var Sa=null;Pa.intersectRects=w,Pa.parseFieldSpecs=A,Pa.compareByFieldSpecs=B,Pa.compareByFieldSpec=C,Pa.flexibleCompare=D,Pa.computeIntervalUnit=I,Pa.divideRangeByDuration=K,Pa.divideDurationByDuration=L,Pa.multiplyDuration=M,Pa.durationHasTime=N;var Ta=["sun","mon","tue","wed","thu","fri","sat"],Ua=["year","month","week","day","hour","minute","second","millisecond"];Pa.log=function(){var a=window.console;return a&&a.log?a.log.apply(a,arguments):void 0},Pa.warn=function(){var a=window.console;return a&&a.warn?a.warn.apply(a,arguments):Pa.log.apply(Pa,arguments)};var Va,Wa,Xa,Ya={}.hasOwnProperty,Za=/^\s*\d{4}-\d\d$/,$a=/^\s*\d{4}-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d+)?)?)?)?)?$/,_a=b.fn,ab=a.extend({},_a);Pa.moment=function(){return ea(arguments)},Pa.moment.utc=function(){var a=ea(arguments,!0);return a.hasTime()&&a.utc(),a},Pa.moment.parseZone=function(){return ea(arguments,!0,!0)},_a.clone=function(){var a=ab.clone.apply(this,arguments);return ga(this,a),this._fullCalendar&&(a._fullCalendar=!0),a},_a.week=_a.weeks=function(a){var b=(this._locale||this._lang)._fullCalendar_weekCalc;return null==a&&"function"==typeof b?b(this):"ISO"===b?ab.isoWeek.apply(this,arguments):ab.week.apply(this,arguments)},_a.time=function(a){if(!this._fullCalendar)return ab.time.apply(this,arguments);if(null==a)return b.duration({hours:this.hours(),minutes:this.minutes(),seconds:this.seconds(),milliseconds:this.milliseconds()});this._ambigTime=!1,b.isDuration(a)||b.isMoment(a)||(a=b.duration(a));var c=0;return b.isDuration(a)&&(c=24*Math.floor(a.asDays())),this.hours(c+a.hours()).minutes(a.minutes()).seconds(a.seconds()).milliseconds(a.milliseconds())},_a.stripTime=function(){var a;return this._ambigTime||(a=this.toArray(),this.utc(),Wa(this,a.slice(0,3)),this._ambigTime=!0,this._ambigZone=!0),this},_a.hasTime=function(){return!this._ambigTime},_a.stripZone=function(){var a,b;return this._ambigZone||(a=this.toArray(),b=this._ambigTime,this.utc(),Wa(this,a),this._ambigTime=b||!1,this._ambigZone=!0),this},_a.hasZone=function(){return!this._ambigZone},_a.local=function(){var a=this.toArray(),b=this._ambigZone;return ab.local.apply(this,arguments),this._ambigTime=!1,this._ambigZone=!1,b&&Xa(this,a),this},_a.utc=function(){return ab.utc.apply(this,arguments),this._ambigTime=!1,this._ambigZone=!1,this},a.each(["zone","utcOffset"],function(a,b){ab[b]&&(_a[b]=function(a){return null!=a&&(this._ambigTime=!1,this._ambigZone=!1),ab[b].apply(this,arguments)})}),_a.format=function(){return this._fullCalendar&&arguments[0]?ja(this,arguments[0]):this._ambigTime?ia(this,"YYYY-MM-DD"):this._ambigZone?ia(this,"YYYY-MM-DD[T]HH:mm:ss"):ab.format.apply(this,arguments)},_a.toISOString=function(){return this._ambigTime?ia(this,"YYYY-MM-DD"):this._ambigZone?ia(this,"YYYY-MM-DD[T]HH:mm:ss"):ab.toISOString.apply(this,arguments)},_a.isWithin=function(a,b){var c=fa([this,a,b]);return c[0]>=c[1]&&c[0]a;a++)b=arguments[a],c-1>a&&ta(this,b);return sa(this,b||{})},ra.mixin=function(a){ta(this,a)};var eb=Pa.Emitter=ra.extend({callbackHash:null,on:function(a,b){return this.getCallbacks(a).add(b),this},off:function(a,b){return this.getCallbacks(a).remove(b),this},trigger:function(a){var b=Array.prototype.slice.call(arguments,1);return this.triggerWith(a,this,b),this},triggerWith:function(a,b,c){var d=this.getCallbacks(a);return d.fireWith(b,c),this},getCallbacks:function(b){var c;return this.callbackHash||(this.callbackHash={}),c=this.callbackHash[b],c||(c=this.callbackHash[b]=a.Callbacks()),c}}),fb=ra.extend({isHidden:!0,options:null,el:null,documentMousedownProxy:null,margin:10,constructor:function(a){this.options=a||{}},show:function(){this.isHidden&&(this.el||this.render(),this.el.show(),this.position(),this.isHidden=!1,this.trigger("show"))},hide:function(){this.isHidden||(this.el.hide(),this.isHidden=!0,this.trigger("hide"))},render:function(){var b=this,c=this.options;this.el=a('
').addClass(c.className||"").css({top:0,left:0}).append(c.content).appendTo(c.parentEl),this.el.on("click",".fc-close",function(){b.hide()}),c.autoHide&&a(document).on("mousedown",this.documentMousedownProxy=ca(this,"documentMousedown"))},documentMousedown:function(b){this.el&&!a(b.target).closest(this.el).length&&this.hide()},removeElement:function(){this.hide(),this.el&&(this.el.remove(),this.el=null),a(document).off("mousedown",this.documentMousedownProxy)},position:function(){var b,c,d,e,f,g=this.options,h=this.el.offsetParent().offset(),i=this.el.outerWidth(),j=this.el.outerHeight(),k=a(window),l=n(this.el);e=g.top||0,f=void 0!==g.left?g.left:void 0!==g.right?g.right-i:0,l.is(window)||l.is(document)?(l=k,b=0,c=0):(d=l.offset(),b=d.top,c=d.left),b+=k.scrollTop(),c+=k.scrollLeft(),g.viewportConstrain!==!1&&(e=Math.min(e,b+l.outerHeight()-j-this.margin),e=Math.max(e,b+this.margin),f=Math.min(f,c+l.outerWidth()-i-this.margin),
+f=Math.max(f,c+this.margin)),this.el.css({top:e-h.top,left:f-h.left})},trigger:function(a){this.options[a]&&this.options[a].apply(this,Array.prototype.slice.call(arguments,1))}}),gb=Pa.CoordCache=ra.extend({els:null,forcedOffsetParentEl:null,origin:null,boundingRect:null,isHorizontal:!1,isVertical:!1,lefts:null,rights:null,tops:null,bottoms:null,constructor:function(b){this.els=a(b.els),this.isHorizontal=b.isHorizontal,this.isVertical=b.isVertical,this.forcedOffsetParentEl=b.offsetParent?a(b.offsetParent):null},build:function(){var a=this.forcedOffsetParentEl||this.els.eq(0).offsetParent();this.origin=a.offset(),this.boundingRect=this.queryBoundingRect(),this.isHorizontal&&this.buildElHorizontals(),this.isVertical&&this.buildElVerticals()},clear:function(){this.origin=null,this.boundingRect=null,this.lefts=null,this.rights=null,this.tops=null,this.bottoms=null},ensureBuilt:function(){this.origin||this.build()},queryBoundingRect:function(){var a=n(this.els.eq(0));return a.is(document)?void 0:p(a)},buildElHorizontals:function(){var b=[],c=[];this.els.each(function(d,e){var f=a(e),g=f.offset().left,h=f.outerWidth();b.push(g),c.push(g+h)}),this.lefts=b,this.rights=c},buildElVerticals:function(){var b=[],c=[];this.els.each(function(d,e){var f=a(e),g=f.offset().top,h=f.outerHeight();b.push(g),c.push(g+h)}),this.tops=b,this.bottoms=c},getHorizontalIndex:function(a){this.ensureBuilt();var b,c=this.boundingRect,d=this.lefts,e=this.rights,f=d.length;if(!c||a>=c.left&&ab;b++)if(a>=d[b]&&a=c.top&&ab;b++)if(a>=d[b]&&a=b*b&&this.startDrag(a)),this.isDragging&&this.drag(d,e,a)},startDrag:function(a){this.isListening||this.startListening(),this.isDragging||(this.isDragging=!0,this.dragStart(a))},dragStart:function(a){var b=this.subjectEl;this.trigger("dragStart",a),(this.subjectHref=b?b.attr("href"):null)&&b.removeAttr("href")},drag:function(a,b,c){this.trigger("drag",a,b,c),this.updateScroll(c)},mouseup:function(a){this.stopListening(a)},stopDrag:function(a){this.isDragging&&(this.stopScrolling(),this.dragStop(a),this.isDragging=!1)},dragStop:function(a){var b=this;this.trigger("dragStop",a),setTimeout(function(){b.subjectHref&&b.subjectEl.attr("href",b.subjectHref)},0)},stopListening:function(b){this.stopDrag(b),this.isListening&&(this.scrollEl&&(this.scrollEl.off("scroll",this.scrollHandlerProxy),this.scrollHandlerProxy=null),a(document).off("mousemove",this.mousemoveProxy).off("mouseup",this.mouseupProxy).off("selectstart",this.preventDefault),this.mousemoveProxy=null,this.mouseupProxy=null,this.isListening=!1,this.listenStop(b))},listenStop:function(a){this.trigger("listenStop",a)},trigger:function(a){this.options[a]&&this.options[a].apply(this,Array.prototype.slice.call(arguments,1))},preventDefault:function(a){a.preventDefault()},computeScrollBounds:function(){var a=this.scrollEl;this.scrollBounds=a?o(a):null},updateScroll:function(a){var b,c,d,e,f=this.scrollSensitivity,g=this.scrollBounds,h=0,i=0;g&&(b=(f-(a.pageY-g.top))/f,c=(f-(g.bottom-a.pageY))/f,d=(f-(a.pageX-g.left))/f,e=(f-(g.right-a.pageX))/f,b>=0&&1>=b?h=b*this.scrollSpeed*-1:c>=0&&1>=c&&(h=c*this.scrollSpeed),d>=0&&1>=d?i=d*this.scrollSpeed*-1:e>=0&&1>=e&&(i=e*this.scrollSpeed)),this.setScrollVel(h,i)},setScrollVel:function(a,b){this.scrollTopVel=a,this.scrollLeftVel=b,this.constrainScrollVel(),!this.scrollTopVel&&!this.scrollLeftVel||this.scrollIntervalId||(this.scrollIntervalId=setInterval(ca(this,"scrollIntervalFunc"),this.scrollIntervalMs))},constrainScrollVel:function(){var a=this.scrollEl;this.scrollTopVel<0?a.scrollTop()<=0&&(this.scrollTopVel=0):this.scrollTopVel>0&&a.scrollTop()+a[0].clientHeight>=a[0].scrollHeight&&(this.scrollTopVel=0),this.scrollLeftVel<0?a.scrollLeft()<=0&&(this.scrollLeftVel=0):this.scrollLeftVel>0&&a.scrollLeft()+a[0].clientWidth>=a[0].scrollWidth&&(this.scrollLeftVel=0)},scrollIntervalFunc:function(){var a=this.scrollEl,b=this.scrollIntervalMs/1e3;this.scrollTopVel&&a.scrollTop(a.scrollTop()+this.scrollTopVel*b),this.scrollLeftVel&&a.scrollLeft(a.scrollLeft()+this.scrollLeftVel*b),this.constrainScrollVel(),this.scrollTopVel||this.scrollLeftVel||this.stopScrolling()},stopScrolling:function(){this.scrollIntervalId&&(clearInterval(this.scrollIntervalId),this.scrollIntervalId=null,this.scrollStop())},scrollHandler:function(){this.scrollIntervalId||this.scrollStop()},scrollStop:function(){}}),ib=hb.extend({component:null,origHit:null,hit:null,coordAdjust:null,constructor:function(a,b){hb.call(this,b),this.component=a},listenStart:function(a){var b,c,d,e=this.subjectEl;hb.prototype.listenStart.apply(this,arguments),this.computeCoords(),a?(c={left:a.pageX,top:a.pageY},d=c,e&&(b=o(e),d=x(d,b)),this.origHit=this.queryHit(d.left,d.top),e&&this.options.subjectCenter&&(this.origHit&&(b=w(this.origHit,b)||b),d=y(b)),this.coordAdjust=z(d,c)):(this.origHit=null,this.coordAdjust=null)},computeCoords:function(){this.component.prepareHits(),this.computeScrollBounds()},dragStart:function(a){var b;hb.prototype.dragStart.apply(this,arguments),b=this.queryHit(a.pageX,a.pageY),b&&this.hitOver(b)},drag:function(a,b,c){var d;hb.prototype.drag.apply(this,arguments),d=this.queryHit(c.pageX,c.pageY),ua(d,this.hit)||(this.hit&&this.hitOut(),d&&this.hitOver(d))},dragStop:function(){this.hitDone(),hb.prototype.dragStop.apply(this,arguments)},hitOver:function(a){var b=ua(a,this.origHit);this.hit=a,this.trigger("hitOver",this.hit,b,this.origHit)},hitOut:function(){this.hit&&(this.trigger("hitOut",this.hit),this.hitDone(),this.hit=null)},hitDone:function(){this.hit&&this.trigger("hitDone",this.hit)},listenStop:function(){hb.prototype.listenStop.apply(this,arguments),this.origHit=null,this.hit=null,this.component.releaseHits()},scrollStop:function(){hb.prototype.scrollStop.apply(this,arguments),this.computeCoords()},queryHit:function(a,b){return this.coordAdjust&&(a+=this.coordAdjust.left,b+=this.coordAdjust.top),this.component.queryHit(a,b)}}),jb=ra.extend({options:null,sourceEl:null,el:null,parentEl:null,top0:null,left0:null,mouseY0:null,mouseX0:null,topDelta:null,leftDelta:null,mousemoveProxy:null,isFollowing:!1,isHidden:!1,isAnimating:!1,constructor:function(b,c){this.options=c=c||{},this.sourceEl=b,this.parentEl=c.parentEl?a(c.parentEl):b.parent()},start:function(b){this.isFollowing||(this.isFollowing=!0,this.mouseY0=b.pageY,this.mouseX0=b.pageX,this.topDelta=0,this.leftDelta=0,this.isHidden||this.updatePosition(),a(document).on("mousemove",this.mousemoveProxy=ca(this,"mousemove")))},stop:function(b,c){function d(){this.isAnimating=!1,e.removeElement(),this.top0=this.left0=null,c&&c()}var e=this,f=this.options.revertDuration;this.isFollowing&&!this.isAnimating&&(this.isFollowing=!1,a(document).off("mousemove",this.mousemoveProxy),b&&f&&!this.isHidden?(this.isAnimating=!0,this.el.animate({top:this.top0,left:this.left0},{duration:f,complete:d})):d())},getEl:function(){var a=this.el;return a||(this.sourceEl.width(),a=this.el=this.sourceEl.clone().css({position:"absolute",visibility:"",display:this.isHidden?"none":"",margin:0,right:"auto",bottom:"auto",width:this.sourceEl.width(),height:this.sourceEl.height(),opacity:this.options.opacity||"",zIndex:this.options.zIndex}).appendTo(this.parentEl)),a},removeElement:function(){this.el&&(this.el.remove(),this.el=null)},updatePosition:function(){var a,b;this.getEl(),null===this.top0&&(this.sourceEl.width(),a=this.sourceEl.offset(),b=this.el.offsetParent().offset(),this.top0=a.top-b.top,this.left0=a.left-b.left),this.el.css({top:this.top0+this.topDelta,left:this.left0+this.leftDelta})},mousemove:function(a){this.topDelta=a.pageY-this.mouseY0,this.leftDelta=a.pageX-this.mouseX0,this.isHidden||this.updatePosition()},hide:function(){this.isHidden||(this.isHidden=!0,this.el&&this.el.hide())},show:function(){this.isHidden&&(this.isHidden=!1,this.updatePosition(),this.getEl().show())}}),kb=Pa.Grid=ra.extend({view:null,isRTL:null,start:null,end:null,el:null,elsByFill:null,externalDragStartProxy:null,eventTimeFormat:null,displayEventTime:null,displayEventEnd:null,minResizeDuration:null,largeUnit:null,constructor:function(a){this.view=a,this.isRTL=a.opt("isRTL"),this.elsByFill={},this.externalDragStartProxy=ca(this,"externalDragStart")},computeEventTimeFormat:function(){return this.view.opt("smallTimeFormat")},computeDisplayEventTime:function(){return!0},computeDisplayEventEnd:function(){return!0},setRange:function(a){this.start=a.start.clone(),this.end=a.end.clone(),this.rangeUpdated(),this.processRangeOptions()},rangeUpdated:function(){},processRangeOptions:function(){var a,b,c=this.view;this.eventTimeFormat=c.opt("eventTimeFormat")||c.opt("timeFormat")||this.computeEventTimeFormat(),a=c.opt("displayEventTime"),null==a&&(a=this.computeDisplayEventTime()),b=c.opt("displayEventEnd"),null==b&&(b=this.computeDisplayEventEnd()),this.displayEventTime=a,this.displayEventEnd=b},spanToSegs:function(a){},diffDates:function(a,b){return this.largeUnit?H(a,b,this.largeUnit):F(a,b)},prepareHits:function(){},releaseHits:function(){},queryHit:function(a,b){},getHitSpan:function(a){},getHitEl:function(a){},setElement:function(b){var c=this;this.el=b,b.on("mousedown",function(b){a(b.target).is(".fc-event-container *, .fc-more")||a(b.target).closest(".fc-popover").length||c.dayMousedown(b)}),this.bindSegHandlers(),this.bindGlobalHandlers()},removeElement:function(){this.unbindGlobalHandlers(),this.el.remove()},renderSkeleton:function(){},renderDates:function(){},unrenderDates:function(){},bindGlobalHandlers:function(){a(document).on("dragstart sortstart",this.externalDragStartProxy)},unbindGlobalHandlers:function(){a(document).off("dragstart sortstart",this.externalDragStartProxy)},dayMousedown:function(a){var b,c,d=this,e=this.view,f=e.opt("selectable"),i=new ib(this,{scroll:e.opt("dragScroll"),dragStart:function(){e.unselect()},hitOver:function(a,e,h){h&&(b=e?a:null,f&&(c=d.computeSelection(d.getHitSpan(h),d.getHitSpan(a)),c?d.renderSelection(c):c===!1&&g()))},hitOut:function(){b=null,c=null,d.unrenderSelection(),h()},listenStop:function(a){b&&e.triggerDayClick(d.getHitSpan(b),d.getHitEl(b),a),c&&e.reportSelection(c,a),h()}});i.mousedown(a)},renderEventLocationHelper:function(a,b){var c=this.fabricateHelperEvent(a,b);this.renderHelper(c,b)},fabricateHelperEvent:function(a,b){var c=b?R(b.event):{};return c.start=a.start.clone(),c.end=a.end?a.end.clone():null,c.allDay=null,this.view.calendar.normalizeEventDates(c),c.className=(c.className||[]).concat("fc-helper"),b||(c.editable=!1),c},renderHelper:function(a,b){},unrenderHelper:function(){},renderSelection:function(a){this.renderHighlight(a)},unrenderSelection:function(){this.unrenderHighlight()},computeSelection:function(a,b){var c=this.computeSelectionSpan(a,b);return c&&!this.view.calendar.isSelectionSpanAllowed(c)?!1:c},computeSelectionSpan:function(a,b){var c=[a.start,a.end,b.start,b.end];return c.sort(aa),{start:c[0].clone(),end:c[3].clone()}},renderHighlight:function(a){this.renderFill("highlight",this.spanToSegs(a))},unrenderHighlight:function(){this.unrenderFill("highlight")},highlightSegClasses:function(){return["fc-highlight"]},renderBusinessHours:function(){},unrenderBusinessHours:function(){},getNowIndicatorUnit:function(){},renderNowIndicator:function(a){},unrenderNowIndicator:function(){},renderFill:function(a,b){},unrenderFill:function(a){var b=this.elsByFill[a];b&&(b.remove(),delete this.elsByFill[a])},renderFillSegEls:function(b,c){var d,e=this,f=this[b+"SegEl"],g="",h=[];if(c.length){for(d=0;d "},getDayClasses:function(a){var b=this.view,c=b.calendar.getNow(),d=["fc-"+Ta[a.day()]];return 1==b.intervalDuration.as("months")&&a.month()!=b.intervalStart.month()&&d.push("fc-other-month"),a.isSame(c,"day")?d.push("fc-today",b.highlightStateClass):c>a?d.push("fc-past"):d.push("fc-future"),d}});kb.mixin({mousedOverSeg:null,isDraggingSeg:!1,isResizingSeg:!1,isDraggingExternal:!1,segs:null,renderEvents:function(a){var b,c=[],d=[];for(b=0;b *",function(c){var e=a(this).data("fc-seg");return!e||b.isDraggingSeg||b.isResizingSeg?void 0:d.call(this,e,c)})})},triggerSegMouseover:function(a,b){this.mousedOverSeg||(this.mousedOverSeg=a,this.view.trigger("eventMouseover",a.el[0],a.event,b))},triggerSegMouseout:function(a,b){b=b||{},this.mousedOverSeg&&(a=a||this.mousedOverSeg,this.mousedOverSeg=null,this.view.trigger("eventMouseout",a.el[0],a.event,b))},segDragMousedown:function(a,b){var c,d=this,e=this.view,f=e.calendar,i=a.el,j=a.event,k=new jb(a.el,{parentEl:e.el,opacity:e.opt("dragOpacity"),revertDuration:e.opt("dragRevertDuration"),zIndex:2}),l=new ib(e,{distance:5,scroll:e.opt("dragScroll"),subjectEl:i,subjectCenter:!0,listenStart:function(a){k.hide(),k.start(a)},dragStart:function(b){d.triggerSegMouseout(a,b),d.segDragStart(a,b),e.hideEvent(j)},hitOver:function(b,h,i){a.hit&&(i=a.hit),c=d.computeEventDrop(i.component.getHitSpan(i),b.component.getHitSpan(b),j),c&&!f.isEventSpanAllowed(d.eventToSpan(c),j)&&(g(),c=null),c&&e.renderDrag(c,a)?k.hide():k.show(),h&&(c=null)},hitOut:function(){e.unrenderDrag(),k.show(),c=null},hitDone:function(){h()},dragStop:function(b){k.stop(!c,function(){e.unrenderDrag(),e.showEvent(j),d.segDragStop(a,b),c&&e.reportEventDrop(j,c,this.largeUnit,i,b)})},listenStop:function(){k.stop()}});l.mousedown(b)},segDragStart:function(a,b){this.isDraggingSeg=!0,this.view.trigger("eventDragStart",a.el[0],a.event,b,{})},segDragStop:function(a,b){this.isDraggingSeg=!1,this.view.trigger("eventDragStop",a.el[0],a.event,b,{})},computeEventDrop:function(a,b,c){var d,e,f=this.view.calendar,g=a.start,h=b.start;return g.hasTime()===h.hasTime()?(d=this.diffDates(h,g),c.allDay&&N(d)?(e={start:c.start.clone(),end:f.getEventEnd(c),allDay:!1},f.normalizeEventTimes(e)):e={start:c.start.clone(),end:c.end?c.end.clone():null,allDay:c.allDay},e.start.add(d),e.end&&e.end.add(d)):e={start:h.clone(),end:null,allDay:!h.hasTime()},e},applyDragOpacity:function(a){var b=this.view.opt("dragOpacity");null!=b&&a.each(function(a,c){c.style.opacity=b})},externalDragStart:function(b,c){var d,e,f=this.view;f.opt("droppable")&&(d=a((c?c.item:null)||b.target),e=f.opt("dropAccept"),(a.isFunction(e)?e.call(d[0],d):d.is(e))&&(this.isDraggingExternal||this.listenToExternalDrag(d,b,c)))},listenToExternalDrag:function(a,b,c){var d,e=this,f=this.view.calendar,i=Ba(a),j=new ib(this,{listenStart:function(){e.isDraggingExternal=!0},hitOver:function(a){d=e.computeExternalDrop(a.component.getHitSpan(a),i),d&&!f.isExternalSpanAllowed(e.eventToSpan(d),d,i.eventProps)&&(g(),d=null),d&&e.renderDrag(d)},hitOut:function(){d=null},hitDone:function(){h(),e.unrenderDrag()},dragStop:function(){d&&e.view.reportExternalDrop(i,d,a,b,c)},listenStop:function(){e.isDraggingExternal=!1}});j.startDrag(b)},computeExternalDrop:function(a,b){var c=this.view.calendar,d={start:c.applyTimezone(a.start),end:null};return b.startTime&&!d.start.hasTime()&&d.start.time(b.startTime),b.duration&&(d.end=d.start.clone().add(b.duration)),d},renderDrag:function(a,b){},unrenderDrag:function(){},segResizeMousedown:function(a,b,c){var d,e=this,f=this.view,i=f.calendar,j=a.el,k=a.event,l=i.getEventEnd(k),m=new ib(this,{distance:5,scroll:f.opt("dragScroll"),subjectEl:j,dragStart:function(b){e.triggerSegMouseout(a,b),e.segResizeStart(a,b)},hitOver:function(b,h,j){var m=e.getHitSpan(j),n=e.getHitSpan(b);d=c?e.computeEventStartResize(m,n,k):e.computeEventEndResize(m,n,k),d&&(i.isEventSpanAllowed(e.eventToSpan(d),k)?d.start.isSame(k.start)&&d.end.isSame(l)&&(d=null):(g(),d=null)),d&&(f.hideEvent(k),e.renderEventResize(d,a))},hitOut:function(){d=null},hitDone:function(){e.unrenderEventResize(),f.showEvent(k),h()},dragStop:function(b){e.segResizeStop(a,b),d&&f.reportEventResize(k,d,this.largeUnit,j,b)}});m.mousedown(b)},segResizeStart:function(a,b){this.isResizingSeg=!0,this.view.trigger("eventResizeStart",a.el[0],a.event,b,{})},segResizeStop:function(a,b){this.isResizingSeg=!1,this.view.trigger("eventResizeStop",a.el[0],a.event,b,{})},computeEventStartResize:function(a,b,c){return this.computeEventResize("start",a,b,c)},computeEventEndResize:function(a,b,c){return this.computeEventResize("end",a,b,c)},computeEventResize:function(a,b,c,d){var e,f,g=this.view.calendar,h=this.diffDates(c[a],b[a]);return e={start:d.start.clone(),end:g.getEventEnd(d),allDay:d.allDay},e.allDay&&N(h)&&(e.allDay=!1,g.normalizeEventTimes(e)),e[a].add(h),e.start.isBefore(e.end)||(f=this.minResizeDuration||(d.allDay?g.defaultAllDayEventDuration:g.defaultTimedEventDuration),"start"==a?e.start=e.end.clone().subtract(f):e.end=e.start.clone().add(f)),e},renderEventResize:function(a,b){},unrenderEventResize:function(){},getEventTimeText:function(a,b,c){return null==b&&(b=this.eventTimeFormat),null==c&&(c=this.displayEventEnd),this.displayEventTime&&a.start.hasTime()?c&&a.end?this.view.formatRange(a,b):a.start.format(b):""},getSegClasses:function(a,b,c){var d=a.event,e=["fc-event",a.isStart?"fc-start":"fc-not-start",a.isEnd?"fc-end":"fc-not-end"].concat(d.className,d.source?d.source.className:[]);return b&&e.push("fc-draggable"),c&&e.push("fc-resizable"),e},getSegSkinCss:function(a){var b=a.event,c=this.view,d=b.source||{},e=b.color,f=d.color,g=c.opt("eventColor");return{"background-color":b.backgroundColor||e||d.backgroundColor||f||c.opt("eventBackgroundColor")||g,"border-color":b.borderColor||e||d.borderColor||f||c.opt("eventBorderColor")||g,color:b.textColor||d.textColor||c.opt("eventTextColor")}},eventToSegs:function(a){return this.eventsToSegs([a])},eventToSpan:function(a){return this.eventToSpans(a)[0]},eventToSpans:function(a){var b=this.eventToRange(a);return this.eventRangeToSpans(b,a)},eventsToSegs:function(b,c){var d=this,e=za(b),f=[];return a.each(e,function(a,b){var e,g=[];for(e=0;eh&&g.push({start:h,end:c.start}),h=c.end;return f>h&&g.push({start:h,end:f}),g},sortEventSegs:function(a){a.sort(ca(this,"compareEventSegs"))},compareEventSegs:function(a,b){return a.eventStartMS-b.eventStartMS||b.eventDurationMS-a.eventDurationMS||b.event.allDay-a.event.allDay||B(a.event,b.event,this.view.eventOrderSpecs)}}),Pa.isBgEvent=wa,Pa.dataAttrPrefix="";var lb=Pa.DayTableMixin={breakOnWeeks:!1,dayDates:null,dayIndices:null,daysPerRow:null,rowCnt:null,colCnt:null,colHeadFormat:null,updateDayTable:function(){for(var a,b,c,d=this.view,e=this.start.clone(),f=-1,g=[],h=[];e.isBefore(this.end);)d.isHiddenDay(e)?g.push(f+.5):(f++,g.push(f),h.push(e.clone())),e.add(1,"days");if(this.breakOnWeeks){for(b=h[0].day(),a=1;ac?b[0]-1:c>=b.length?b[b.length-1]+1:b[c]},computeColHeadFormat:function(){return this.rowCnt>1||this.colCnt>10?"ddd":this.colCnt>1?this.view.opt("dayOfMonthFormat"):"dddd"},sliceRangeByRow:function(a){var b,c,d,e,f,g=this.daysPerRow,h=this.view.computeDayRange(a),i=this.getDateDayIndex(h.start),j=this.getDateDayIndex(h.end.clone().subtract(1,"days")),k=[];for(b=0;b=e&&k.push({row:b,firstRowDayIndex:e-c,lastRowDayIndex:f-c,isStart:e===i,isEnd:f===j});return k},sliceRangeByDay:function(a){var b,c,d,e,f,g,h=this.daysPerRow,i=this.view.computeDayRange(a),j=this.getDateDayIndex(i.start),k=this.getDateDayIndex(i.end.clone().subtract(1,"days")),l=[];for(b=0;b=e;e++)f=Math.max(j,e),g=Math.min(k,e),f=Math.ceil(f),g=Math.floor(g),g>=f&&l.push({row:b,firstRowDayIndex:f-c,lastRowDayIndex:g-c,isStart:f===j,isEnd:g===k});return l},renderHeadHtml:function(){var a=this.view;return'"},renderHeadIntroHtml:function(){return this.renderIntroHtml()},renderHeadTrHtml:function(){return""+(this.isRTL?"":this.renderHeadIntroHtml())+this.renderHeadDateCellsHtml()+(this.isRTL?this.renderHeadIntroHtml():"")+" "},renderHeadDateCellsHtml:function(){var a,b,c=[];for(a=0;a "},renderIntroHtml:function(){},bookendCells:function(a){var b=this.renderIntroHtml();b&&(this.isRTL?a.append(b):a.prepend(b))}},mb=Pa.DayGrid=kb.extend(lb,{numbersVisible:!1,bottomCoordPadding:0,rowEls:null,cellEls:null,helperEls:null,rowCoordCache:null,colCoordCache:null,renderDates:function(a){var b,c,d=this.view,e=this.rowCnt,f=this.colCnt,g="";for(b=0;e>b;b++)g+=this.renderDayRowHtml(b,a);for(this.el.html(g),this.rowEls=this.el.find(".fc-row"),this.cellEls=this.el.find(".fc-day"),this.rowCoordCache=new gb({els:this.rowEls,isVertical:!0}),this.colCoordCache=new gb({els:this.cellEls.slice(0,this.colCnt),isHorizontal:!0}),b=0;e>b;b++)for(c=0;f>c;c++)d.trigger("dayRender",null,this.getCellDate(b,c),this.getCellEl(b,c))},unrenderDates:function(){this.removeSegPopover()},renderBusinessHours:function(){var a=this.view.calendar.getBusinessHoursEvents(!0),b=this.eventsToSegs(a);this.renderFill("businessHours",b,"bgevent")},renderDayRowHtml:function(a,b){var c=this.view,d=["fc-row","fc-week",c.widgetContentClass];return b&&d.push("fc-rigid"),''+this.renderBgTrHtml(a)+'
'+(this.numbersVisible?""+this.renderNumberTrHtml(a)+" ":"")+"
"},renderNumberTrHtml:function(a){return""+(this.isRTL?"":this.renderNumberIntroHtml(a))+this.renderNumberCellsHtml(a)+(this.isRTL?this.renderNumberIntroHtml(a):"")+" "},renderNumberIntroHtml:function(a){return this.renderIntroHtml()},renderNumberCellsHtml:function(a){var b,c,d=[];for(b=0;b'+a.date()+""):" "},computeEventTimeFormat:function(){return this.view.opt("extraSmallTimeFormat")},computeDisplayEventEnd:function(){return 1==this.colCnt},rangeUpdated:function(){this.updateDayTable()},spanToSegs:function(a){var b,c,d=this.sliceRangeByRow(a);for(b=0;b');g=c&&c.row===b?c.el.position().top:h.find(".fc-content-skeleton tbody").position().top,i.css("top",g).find("table").append(d[b].tbodyEl),h.append(i),e.push(i[0])}),this.helperEls=a(e)},unrenderHelper:function(){this.helperEls&&(this.helperEls.remove(),this.helperEls=null)},fillSegTag:"td",renderFill:function(b,c,d){var e,f,g,h=[];for(c=this.renderFillSegEls(b,c),e=0;e'),f=e.find("tr"),h>0&&f.append(' '),f.append(c.el.attr("colspan",i-h)),g>i&&f.append(' '),this.bookendCells(f),e}});mb.mixin({rowStructs:null,unrenderEvents:function(){this.removeSegPopover(),kb.prototype.unrenderEvents.apply(this,arguments)},getEventSegs:function(){return kb.prototype.getEventSegs.call(this).concat(this.popoverSegs||[])},renderBgSegs:function(b){var c=a.grep(b,function(a){return a.event.allDay});return kb.prototype.renderBgSegs.call(this,c)},renderFgSegs:function(b){var c;return b=this.renderFgSegEls(b),c=this.rowStructs=this.renderSegRows(b),this.rowEls.each(function(b,d){a(d).find(".fc-content-skeleton > table").append(c[b].tbodyEl)}),b},unrenderFgSegs:function(){for(var a,b=this.rowStructs||[];a=b.pop();)a.tbodyEl.remove();
+this.rowStructs=null},renderSegRows:function(a){var b,c,d=[];for(b=this.groupSegRows(a),c=0;c'+Y(c)+"")),d=''+(Y(f.title||"")||" ")+" ",''+(this.isRTL?d+" "+l:l+" "+d)+"
"+(h?'
':"")+(i?'
':"")+" "},renderSegRow:function(b,c){function d(b){for(;b>g;)k=(r[e-1]||[])[g],k?k.attr("rowspan",parseInt(k.attr("rowspan")||1,10)+1):(k=a(" "),h.append(k)),q[e][g]=k,r[e][g]=k,g++}var e,f,g,h,i,j,k,l=this.colCnt,m=this.buildSegLevels(c),n=Math.max(1,m.length),o=a(" "),p=[],q=[],r=[];for(e=0;n>e;e++){if(f=m[e],g=0,h=a(" "),p.push([]),q.push([]),r.push([]),f)for(i=0;i ').append(j.el),j.leftCol!=j.rightCol?k.attr("colspan",j.rightCol-j.leftCol+1):r[e][g]=k;g<=j.rightCol;)q[e][g]=k,p[e][g]=j,g++;h.append(k)}d(l),this.bookendCells(h),o.append(h)}return{row:b,tbodyEl:o,cellMatrix:q,segMatrix:p,segLevels:m,segs:c}},buildSegLevels:function(a){var b,c,d,e=[];for(this.sortEventSegs(a),b=0;b td > :first-child").each(c),e.position().top+f>h)return d;return!1},limitRow:function(b,c){function d(d){for(;d>w;)j=t.getCellSegs(b,w,c),j.length&&(m=f[c-1][w],s=t.renderMoreLink(b,w,j),r=a("
").append(s),m.append(r),v.push(r[0])),w++}var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t=this,u=this.rowStructs[b],v=[],w=0;if(c&&c ').attr("rowspan",n),j=l[p],s=this.renderMoreLink(b,i.leftCol+p,[i].concat(j)),r=a("
").append(s),q.append(r),o.push(q[0]),v.push(q[0]);m.addClass("fc-limited").after(a(o)),g.push(m[0])}}d(this.colCnt),u.moreEls=a(v),u.limitedEls=a(g)}},unlimitRow:function(a){var b=this.rowStructs[a];b.moreEls&&(b.moreEls.remove(),b.moreEls=null),b.limitedEls&&(b.limitedEls.removeClass("fc-limited"),b.limitedEls=null)},renderMoreLink:function(b,c,d){var e=this,f=this.view;return a(' ').text(this.getMoreLinkText(d.length)).on("click",function(g){var h=f.opt("eventLimitClick"),i=e.getCellDate(b,c),j=a(this),k=e.getCellEl(b,c),l=e.getCellSegs(b,c),m=e.resliceDaySegs(l,i),n=e.resliceDaySegs(d,i);"function"==typeof h&&(h=f.trigger("eventLimitClick",null,{date:i,dayEl:k,moreEl:j,segs:m,hiddenSegs:n},g)),"popover"===h?e.showSegPopover(b,c,j,m):"string"==typeof h&&f.calendar.zoomTo(i,h)})},showSegPopover:function(a,b,c,d){var e,f,g=this,h=this.view,i=c.parent();e=1==this.rowCnt?h.el:this.rowEls.eq(a),f={className:"fc-more-popover",content:this.renderSegPopoverContent(a,b,d),parentEl:this.el,top:e.offset().top,autoHide:!0,viewportConstrain:h.opt("popoverViewportConstrain"),hide:function(){g.segPopover.removeElement(),g.segPopover=null,g.popoverSegs=null}},this.isRTL?f.right=i.offset().left+i.outerWidth()+1:f.left=i.offset().left-1,this.segPopover=new fb(f),this.segPopover.show()},renderSegPopoverContent:function(b,c,d){var e,f=this.view,g=f.opt("theme"),h=this.getCellDate(b,c).format(f.opt("dayPopoverFormat")),i=a(''),j=i.find(".fc-event-container");for(d=this.renderFgSegEls(d,!0),this.popoverSegs=d,e=0;e'+this.renderBgTrHtml(0)+'
'+this.renderSlatRowHtml()+"
"},renderSlatRowHtml:function(){for(var a,c,d,e=this.view,f=this.isRTL,g="",h=b.duration(+this.minTime);h"+(c?""+Y(a.format(this.labelFormat))+" ":"")+"",g+='"+(f?"":d)+' '+(f?d:"")+" ",h.add(this.slotDuration);return g},processOptions:function(){var c,d=this.view,e=d.opt("slotDuration"),f=d.opt("snapDuration");e=b.duration(e),f=f?b.duration(f):e,this.slotDuration=e,this.snapDuration=f,this.snapsPerSlot=e/f,this.minResizeDuration=f,this.minTime=b.duration(d.opt("minTime")),this.maxTime=b.duration(d.opt("maxTime")),c=d.opt("slotLabelFormat"),a.isArray(c)&&(c=c[c.length-1]),this.labelFormat=c||d.opt("axisFormat")||d.opt("smallTimeFormat"),c=d.opt("slotLabelInterval"),this.labelInterval=c?b.duration(c):this.computeLabelInterval(e)},computeLabelInterval:function(a){var c,d,e;for(c=Db.length-1;c>=0;c--)if(d=b.duration(Db[c]),e=L(d,a),ba(e)&&e>1)return d;return b.duration(a)},computeEventTimeFormat:function(){return this.view.opt("noMeridiemTimeFormat")},computeDisplayEventEnd:function(){return!0},prepareHits:function(){this.colCoordCache.build(),this.slatCoordCache.build()},releaseHits:function(){this.colCoordCache.clear()},queryHit:function(a,b){var c=this.snapsPerSlot,d=this.colCoordCache,e=this.slatCoordCache,f=d.getHorizontalIndex(a),g=e.getVerticalIndex(b);if(null!=f&&null!=g){var h=e.getTopOffset(g),i=e.getHeight(g),j=(b-h)/i,k=Math.floor(j*c),l=g*c+k,m=h+k/c*i,n=h+(k+1)/c*i;return{col:f,snap:l,component:this,left:d.getLeftOffset(f),right:d.getRightOffset(f),top:m,bottom:n}}},getHitSpan:function(a){var b,c=this.getCellDate(0,a.col),d=this.computeSnapTime(a.snap);return c.time(d),b=c.clone().add(this.snapDuration),{start:c,end:b}},getHitEl:function(a){return this.colEls.eq(a.col)},rangeUpdated:function(){this.updateDayTable()},computeSnapTime:function(a){return b.duration(this.minTime+this.snapDuration*a)},spanToSegs:function(a){var b,c=this.sliceRangeByTimes(a);for(b=0;b').css("top",e).appendTo(this.colContainerEls.eq(d[c].col))[0]);d.length>0&&f.push(a('
').css("top",e).appendTo(this.el.find(".fc-content-skeleton"))[0]),this.nowIndicatorEls=a(f)},unrenderNowIndicator:function(){this.nowIndicatorEls&&(this.nowIndicatorEls.remove(),this.nowIndicatorEls=null)},renderSelection:function(a){this.view.opt("selectHelper")?this.renderEventLocationHelper(a):this.renderHighlight(a)},unrenderSelection:function(){this.unrenderHelper(),this.unrenderHighlight()},renderHighlight:function(a){this.renderHighlightSegs(this.spanToSegs(a))},unrenderHighlight:function(){this.unrenderHighlightSegs()}});nb.mixin({colContainerEls:null,fgContainerEls:null,bgContainerEls:null,helperContainerEls:null,highlightContainerEls:null,businessContainerEls:null,fgSegs:null,bgSegs:null,helperSegs:null,highlightSegs:null,businessSegs:null,renderContentSkeleton:function(){var b,c,d="";for(b=0;b';c=a('"),this.colContainerEls=c.find(".fc-content-col"),this.helperContainerEls=c.find(".fc-helper-container"),this.fgContainerEls=c.find(".fc-event-container:not(.fc-helper-container)"),this.bgContainerEls=c.find(".fc-bgevent-container"),this.highlightContainerEls=c.find(".fc-highlight-container"),this.businessContainerEls=c.find(".fc-business-container"),this.bookendCells(c.find("tr")),this.el.append(c)},renderFgSegs:function(a){return a=this.renderFgSegsIntoContainers(a,this.fgContainerEls),this.fgSegs=a,a},unrenderFgSegs:function(){this.unrenderNamedSegs("fgSegs")},renderHelperSegs:function(a,b){var c,d,e;for(a=this.renderFgSegsIntoContainers(a,this.helperContainerEls),c=0;c'+(c?'
'+Y(c)+"
":"")+(g.title?'
'+Y(g.title)+"
":"")+'
'+(j?'
':"")+""},updateSegVerticals:function(a){this.computeSegVerticals(a),this.assignSegVerticals(a)},computeSegVerticals:function(a){var b,c;for(b=0;b1?"ll":"LL"},formatRange:function(a,b,c){var d=a.end;return d.hasTime()||(d=d.clone().subtract(1)),ma(a.start,d,b,c,this.opt("isRTL"))},setElement:function(a){this.el=a,this.bindGlobalHandlers()},removeElement:function(){this.clear(),this.isSkeletonRendered&&(this.unrenderSkeleton(),this.isSkeletonRendered=!1),this.unbindGlobalHandlers(),this.el.remove()},display:function(b){var c=this,d=null;return this.displaying&&(d=this.queryScroll()),this.calendar.freezeContentHeight(),this.clear().then(function(){return c.displaying=a.when(c.displayView(b)).then(function(){c.forceScroll(c.computeInitialScroll(d)),c.calendar.unfreezeContentHeight(),c.triggerRender()})})},clear:function(){var b=this,c=this.displaying;return c?c.then(function(){return b.displaying=null,b.clearEvents(),b.clearView()}):a.when()},displayView:function(a){this.isSkeletonRendered||(this.renderSkeleton(),this.isSkeletonRendered=!0),a&&this.setDate(a),this.render&&this.render(),this.renderDates(),this.updateSize(),this.renderBusinessHours(),this.startNowIndicator()},clearView:function(){this.unselect(),this.stopNowIndicator(),this.triggerUnrender(),this.unrenderBusinessHours(),this.unrenderDates(),this.destroy&&this.destroy()},renderSkeleton:function(){},unrenderSkeleton:function(){},renderDates:function(){},unrenderDates:function(){},triggerRender:function(){this.trigger("viewRender",this,this,this.el)},triggerUnrender:function(){this.trigger("viewDestroy",this,this,this.el)},bindGlobalHandlers:function(){a(document).on("mousedown",this.documentMousedownProxy)},unbindGlobalHandlers:function(){a(document).off("mousedown",this.documentMousedownProxy)},initThemingProps:function(){var a=this.opt("theme")?"ui":"fc";this.widgetHeaderClass=a+"-widget-header",this.widgetContentClass=a+"-widget-content",this.highlightStateClass=a+"-state-highlight"},renderBusinessHours:function(){},unrenderBusinessHours:function(){},startNowIndicator:function(){var a,c,d,e=this;this.opt("nowIndicator")&&(a=this.getNowIndicatorUnit(),a&&(c=ca(this,"updateNowIndicator"),this.initialNowDate=this.calendar.getNow(),this.initialNowQueriedMs=+new Date,this.renderNowIndicator(this.initialNowDate),this.isNowIndicatorRendered=!0,d=this.initialNowDate.clone().startOf(a).add(1,a)-this.initialNowDate,this.nowIndicatorTimeoutID=setTimeout(function(){e.nowIndicatorTimeoutID=null,c(),d=+b.duration(1,a),d=Math.max(100,d),e.nowIndicatorIntervalID=setInterval(c,d)},d)))},updateNowIndicator:function(){this.isNowIndicatorRendered&&(this.unrenderNowIndicator(),this.renderNowIndicator(this.initialNowDate.clone().add(new Date-this.initialNowQueriedMs)))},stopNowIndicator:function(){this.isNowIndicatorRendered&&(this.nowIndicatorTimeoutID&&(clearTimeout(this.nowIndicatorTimeoutID),this.nowIndicatorTimeoutID=null),this.nowIndicatorIntervalID&&(clearTimeout(this.nowIndicatorIntervalID),this.nowIndicatorIntervalID=null),this.unrenderNowIndicator(),this.isNowIndicatorRendered=!1)},getNowIndicatorUnit:function(){},renderNowIndicator:function(a){},unrenderNowIndicator:function(){},updateSize:function(a){var b;a&&(b=this.queryScroll()),this.updateHeight(a),this.updateWidth(a),this.updateNowIndicator(),a&&this.setScroll(b)},updateWidth:function(a){},updateHeight:function(a){var b=this.calendar;this.setHeight(b.getSuggestedViewHeight(),b.isHeightAuto())},setHeight:function(a,b){},computeScrollerHeight:function(a){var b,c,d=this.scrollerEl;return b=this.el.add(d),b.css({position:"relative",left:-1}),c=this.el.outerHeight()-d.height(),b.css({position:"",left:""}),a-c},computeInitialScroll:function(a){return 0},queryScroll:function(){return this.scrollerEl?this.scrollerEl.scrollTop():void 0},setScroll:function(a){return this.scrollerEl?this.scrollerEl.scrollTop(a):void 0},forceScroll:function(a){var b=this;this.setScroll(a),setTimeout(function(){b.setScroll(a)},0)},displayEvents:function(a){var b=this.queryScroll();this.clearEvents(),this.renderEvents(a),this.isEventsRendered=!0,this.setScroll(b),this.triggerEventRender()},clearEvents:function(){var a;this.isEventsRendered&&(a=this.queryScroll(),this.triggerEventUnrender(),this.destroyEvents&&this.destroyEvents(),this.unrenderEvents(),this.setScroll(a),this.isEventsRendered=!1)},renderEvents:function(a){},unrenderEvents:function(){},triggerEventRender:function(){this.renderedEventSegEach(function(a){this.trigger("eventAfterRender",a.event,a.event,a.el)}),this.trigger("eventAfterAllRender")},triggerEventUnrender:function(){this.renderedEventSegEach(function(a){this.trigger("eventDestroy",a.event,a.event,a.el)})},resolveEventEl:function(b,c){var d=this.trigger("eventRender",b,b,c);return d===!1?c=null:d&&d!==!0&&(c=a(d)),c},showEvent:function(a){this.renderedEventSegEach(function(a){a.el.css("visibility","")},a)},hideEvent:function(a){this.renderedEventSegEach(function(a){a.el.css("visibility","hidden")},a)},renderedEventSegEach:function(a,b){var c,d=this.getEventSegs();for(c=0;cb;b++)(d[b]=-1!==a.inArray(b,c))||e++;if(!e)throw"invalid hiddenDays";this.isHiddenDayHash=d},isHiddenDay:function(a){return b.isMoment(a)&&(a=a.day()),this.isHiddenDayHash[a]},skipHiddenDays:function(a,b,c){var d=a.clone();for(b=b||1;this.isHiddenDayHash[(d.day()+(c?b:0)+7)%7];)d.add(b,"days");return d},computeDayRange:function(a){var b,c=a.start.clone().stripTime(),d=a.end,e=null;return d&&(e=d.clone().stripTime(),b=+d.time(),b&&b>=this.nextDayThreshold&&e.add(1,"days")),(!d||c>=e)&&(e=c.clone().add(1,"days")),{start:c,end:e}},isMultiDayEvent:function(a){var b=this.computeDayRange(a);return b.end.diff(b.start,"days")>1}}),pb=Pa.Calendar=ra.extend({dirDefaults:null,langDefaults:null,overrides:null,options:null,viewSpecCache:null,view:null,header:null,loadingLevel:0,constructor:Ja,initialize:function(){},initOptions:function(a){var b,e,f,g;a=d(a),b=a.lang,e=qb[b],e||(b=pb.defaults.lang,e=qb[b]||{}),f=X(a.isRTL,e.isRTL,pb.defaults.isRTL),g=f?pb.rtlDefaults:{},this.dirDefaults=g,this.langDefaults=e,this.overrides=a,this.options=c([pb.defaults,g,e,a]),Ka(this.options),this.viewSpecCache={}},getViewSpec:function(a){var b=this.viewSpecCache;return b[a]||(b[a]=this.buildViewSpec(a))},getUnitViewSpec:function(b){var c,d,e;if(-1!=a.inArray(b,Ua))for(c=this.header.getViewsWithButtons(),a.each(Pa.views,function(a){c.push(a)}),d=0;d1,this.weekNumbersVisible=this.opt("weekNumbers"),this.dayGrid.numbersVisible=this.dayNumbersVisible||this.weekNumbersVisible,this.el.addClass("fc-basic-view").html(this.renderSkeletonHtml()),this.renderHead(),this.scrollerEl=this.el.find(".fc-day-grid-container"),this.dayGrid.setElement(this.el.find(".fc-day-grid")),this.dayGrid.renderDates(this.hasRigidRows())},renderHead:function(){this.headContainerEl=this.el.find(".fc-head-container").html(this.dayGrid.renderHeadHtml()),this.headRowEl=this.headContainerEl.find(".fc-row")},unrenderDates:function(){this.dayGrid.unrenderDates(),this.dayGrid.removeElement()},renderBusinessHours:function(){this.dayGrid.renderBusinessHours()},renderSkeletonHtml:function(){return''},weekNumberStyleAttr:function(){return null!==this.weekNumberWidth?'style="width:'+this.weekNumberWidth+'px"':""},hasRigidRows:function(){var a=this.opt("eventLimit");return a&&"number"!=typeof a},updateWidth:function(){this.weekNumbersVisible&&(this.weekNumberWidth=k(this.el.find(".fc-week-number")))},setHeight:function(a,b){var c,d=this.opt("eventLimit");m(this.scrollerEl),f(this.headRowEl),this.dayGrid.removeSegPopover(),d&&"number"==typeof d&&this.dayGrid.limitRows(d),c=this.computeScrollerHeight(a),this.setGridHeight(c,b),d&&"number"!=typeof d&&this.dayGrid.limitRows(d),!b&&l(this.scrollerEl,c)&&(e(this.headRowEl,r(this.scrollerEl)),c=this.computeScrollerHeight(a),this.scrollerEl.height(c))},setGridHeight:function(a,b){b?j(this.dayGrid.rowEls):i(this.dayGrid.rowEls,a,!0)},prepareHits:function(){this.dayGrid.prepareHits()},releaseHits:function(){this.dayGrid.releaseHits()},queryHit:function(a,b){return this.dayGrid.queryHit(a,b)},getHitSpan:function(a){return this.dayGrid.getHitSpan(a)},getHitEl:function(a){return this.dayGrid.getHitEl(a)},renderEvents:function(a){this.dayGrid.renderEvents(a),this.updateHeight()},getEventSegs:function(){return this.dayGrid.getEventSegs()},unrenderEvents:function(){this.dayGrid.unrenderEvents()},renderDrag:function(a,b){return this.dayGrid.renderDrag(a,b)},unrenderDrag:function(){this.dayGrid.unrenderDrag()},renderSelection:function(a){this.dayGrid.renderSelection(a)},unrenderSelection:function(){this.dayGrid.unrenderSelection()}}),xb={renderHeadIntroHtml:function(){var a=this.view;return a.weekNumbersVisible?'":""},renderNumberIntroHtml:function(a){var b=this.view;return b.weekNumbersVisible?'"+this.getCellDate(a,0).format("w")+" ":""},renderBgIntroHtml:function(){var a=this.view;return a.weekNumbersVisible?' ":""},renderIntroHtml:function(){var a=this.view;return a.weekNumbersVisible?' ":""}},yb=Pa.MonthView=wb.extend({computeRange:function(a){var b,c=wb.prototype.computeRange.call(this,a);return this.isFixedWeeks()&&(b=Math.ceil(c.end.diff(c.start,"weeks",!0)),c.end.add(6-b,"weeks")),c},setGridHeight:function(a,b){b=b||"variable"===this.opt("weekMode"),b&&(a*=this.rowCnt/6),i(this.dayGrid.rowEls,a,!b)},isFixedWeeks:function(){var a=this.opt("weekMode");return a?"fixed"===a:this.opt("fixedWeekCount")}});Qa.basic={"class":wb},Qa.basicDay={type:"basic",duration:{days:1}},Qa.basicWeek={type:"basic",duration:{weeks:1}},Qa.month={"class":yb,duration:{months:1},defaults:{fixedWeekCount:!0}};var zb=Pa.AgendaView=ob.extend({timeGridClass:nb,timeGrid:null,dayGridClass:mb,dayGrid:null,axisWidth:null,headContainerEl:null,noScrollRowEls:null,bottomRuleEl:null,bottomRuleHeight:null,initialize:function(){this.timeGrid=this.instantiateTimeGrid(),this.opt("allDaySlot")&&(this.dayGrid=this.instantiateDayGrid())},instantiateTimeGrid:function(){var a=this.timeGridClass.extend(Ab);return new a(this)},instantiateDayGrid:function(){var a=this.dayGridClass.extend(Bb);return new a(this)},setRange:function(a){ob.prototype.setRange.call(this,a),this.timeGrid.setRange(a),this.dayGrid&&this.dayGrid.setRange(a)},renderDates:function(){this.el.addClass("fc-agenda-view").html(this.renderSkeletonHtml()),this.renderHead(),this.scrollerEl=this.el.find(".fc-time-grid-container"),this.timeGrid.setElement(this.el.find(".fc-time-grid")),this.timeGrid.renderDates(),this.bottomRuleEl=a('').appendTo(this.timeGrid.el),this.dayGrid&&(this.dayGrid.setElement(this.el.find(".fc-day-grid")),this.dayGrid.renderDates(),this.dayGrid.bottomCoordPadding=this.dayGrid.el.next("hr").outerHeight()),this.noScrollRowEls=this.el.find(".fc-row:not(.fc-scroller *)")},renderHead:function(){this.headContainerEl=this.el.find(".fc-head-container").html(this.timeGrid.renderHeadHtml())},unrenderDates:function(){this.timeGrid.unrenderDates(),this.timeGrid.removeElement(),this.dayGrid&&(this.dayGrid.unrenderDates(),this.dayGrid.removeElement())},renderSkeletonHtml:function(){return''},axisStyleAttr:function(){return null!==this.axisWidth?'style="width:'+this.axisWidth+'px"':""},renderBusinessHours:function(){this.timeGrid.renderBusinessHours(),this.dayGrid&&this.dayGrid.renderBusinessHours()},unrenderBusinessHours:function(){this.timeGrid.unrenderBusinessHours(),this.dayGrid&&this.dayGrid.unrenderBusinessHours()},getNowIndicatorUnit:function(){return this.timeGrid.getNowIndicatorUnit()},renderNowIndicator:function(a){this.timeGrid.renderNowIndicator(a)},unrenderNowIndicator:function(){this.timeGrid.unrenderNowIndicator()},updateSize:function(a){this.timeGrid.updateSize(a),ob.prototype.updateSize.call(this,a)},updateWidth:function(){this.axisWidth=k(this.el.find(".fc-axis"))},setHeight:function(a,b){var c,d;null===this.bottomRuleHeight&&(this.bottomRuleHeight=this.bottomRuleEl.outerHeight()),this.bottomRuleEl.hide(),this.scrollerEl.css("overflow",""),m(this.scrollerEl),f(this.noScrollRowEls),this.dayGrid&&(this.dayGrid.removeSegPopover(),c=this.opt("eventLimit"),c&&"number"!=typeof c&&(c=Cb),c&&this.dayGrid.limitRows(c)),b||(d=this.computeScrollerHeight(a),l(this.scrollerEl,d)?(e(this.noScrollRowEls,r(this.scrollerEl)),d=this.computeScrollerHeight(a),this.scrollerEl.height(d)):(this.scrollerEl.height(d).css("overflow","hidden"),this.bottomRuleEl.show()))},computeInitialScroll:function(){var a=b.duration(this.opt("scrollTime")),c=this.timeGrid.computeTimeTop(a);return c=Math.ceil(c),c&&c++,c},prepareHits:function(){this.timeGrid.prepareHits(),this.dayGrid&&this.dayGrid.prepareHits()},releaseHits:function(){this.timeGrid.releaseHits(),this.dayGrid&&this.dayGrid.releaseHits()},queryHit:function(a,b){var c=this.timeGrid.queryHit(a,b);return!c&&this.dayGrid&&(c=this.dayGrid.queryHit(a,b)),c},getHitSpan:function(a){return a.component.getHitSpan(a)},getHitEl:function(a){return a.component.getHitEl(a)},renderEvents:function(a){var b,c,d=[],e=[],f=[];for(c=0;c